Show More
@@ -1,333 +1,332 b'' | |||||
1 | # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport |
|
1 | # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import errno |
|
9 | import errno | |
10 | import os |
|
10 | import os | |
11 | import re |
|
11 | import re | |
12 | import socket |
|
12 | import socket | |
13 |
|
13 | |||
14 | from mercurial.i18n import _ |
|
14 | from mercurial.i18n import _ | |
15 | from mercurial.pycompat import ( |
|
15 | from mercurial.pycompat import ( | |
16 | getattr, |
|
16 | getattr, | |
17 | open, |
|
17 | open, | |
18 | ) |
|
18 | ) | |
19 | from mercurial import ( |
|
19 | from mercurial import ( | |
20 | encoding, |
|
20 | encoding, | |
21 | error, |
|
21 | error, | |
22 | pycompat, |
|
22 | pycompat, | |
23 | util, |
|
23 | util, | |
24 | ) |
|
24 | ) | |
25 | from mercurial.utils import ( |
|
25 | from mercurial.utils import ( | |
26 | dateutil, |
|
26 | dateutil, | |
27 | procutil, |
|
27 | procutil, | |
28 | ) |
|
28 | ) | |
29 |
|
29 | |||
30 | from . import ( |
|
30 | from . import ( | |
31 | common, |
|
31 | common, | |
32 | cvsps, |
|
32 | cvsps, | |
33 | ) |
|
33 | ) | |
34 |
|
34 | |||
35 | stringio = util.stringio |
|
35 | stringio = util.stringio | |
36 | checktool = common.checktool |
|
36 | checktool = common.checktool | |
37 | commit = common.commit |
|
37 | commit = common.commit | |
38 | converter_source = common.converter_source |
|
38 | converter_source = common.converter_source | |
39 | makedatetimestamp = common.makedatetimestamp |
|
39 | makedatetimestamp = common.makedatetimestamp | |
40 | NoRepo = common.NoRepo |
|
40 | NoRepo = common.NoRepo | |
41 |
|
41 | |||
42 |
|
42 | |||
43 | class convert_cvs(converter_source): |
|
43 | class convert_cvs(converter_source): | |
44 | def __init__(self, ui, repotype, path, revs=None): |
|
44 | def __init__(self, ui, repotype, path, revs=None): | |
45 | super(convert_cvs, self).__init__(ui, repotype, path, revs=revs) |
|
45 | super(convert_cvs, self).__init__(ui, repotype, path, revs=revs) | |
46 |
|
46 | |||
47 | cvs = os.path.join(path, b"CVS") |
|
47 | cvs = os.path.join(path, b"CVS") | |
48 | if not os.path.exists(cvs): |
|
48 | if not os.path.exists(cvs): | |
49 | raise NoRepo(_(b"%s does not look like a CVS checkout") % path) |
|
49 | raise NoRepo(_(b"%s does not look like a CVS checkout") % path) | |
50 |
|
50 | |||
51 | checktool(b'cvs') |
|
51 | checktool(b'cvs') | |
52 |
|
52 | |||
53 | self.changeset = None |
|
53 | self.changeset = None | |
54 | self.files = {} |
|
54 | self.files = {} | |
55 | self.tags = {} |
|
55 | self.tags = {} | |
56 | self.lastbranch = {} |
|
56 | self.lastbranch = {} | |
57 | self.socket = None |
|
57 | self.socket = None | |
58 | self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1] |
|
58 | self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1] | |
59 | self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1] |
|
59 | self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1] | |
60 | self.encoding = encoding.encoding |
|
60 | self.encoding = encoding.encoding | |
61 |
|
61 | |||
62 | self._connect() |
|
62 | self._connect() | |
63 |
|
63 | |||
64 | def _parse(self): |
|
64 | def _parse(self): | |
65 | if self.changeset is not None: |
|
65 | if self.changeset is not None: | |
66 | return |
|
66 | return | |
67 | self.changeset = {} |
|
67 | self.changeset = {} | |
68 |
|
68 | |||
69 | maxrev = 0 |
|
69 | maxrev = 0 | |
70 | if self.revs: |
|
70 | if self.revs: | |
71 | if len(self.revs) > 1: |
|
71 | if len(self.revs) > 1: | |
72 | raise error.Abort( |
|
72 | raise error.Abort( | |
73 | _( |
|
73 | _( | |
74 | b'cvs source does not support specifying ' |
|
74 | b'cvs source does not support specifying ' | |
75 | b'multiple revs' |
|
75 | b'multiple revs' | |
76 | ) |
|
76 | ) | |
77 | ) |
|
77 | ) | |
78 | # TODO: handle tags |
|
78 | # TODO: handle tags | |
79 | try: |
|
79 | try: | |
80 | # patchset number? |
|
80 | # patchset number? | |
81 | maxrev = int(self.revs[0]) |
|
81 | maxrev = int(self.revs[0]) | |
82 | except ValueError: |
|
82 | except ValueError: | |
83 | raise error.Abort( |
|
83 | raise error.Abort( | |
84 | _(b'revision %s is not a patchset number') % self.revs[0] |
|
84 | _(b'revision %s is not a patchset number') % self.revs[0] | |
85 | ) |
|
85 | ) | |
86 |
|
86 | |||
87 | d = encoding.getcwd() |
|
87 | d = encoding.getcwd() | |
88 | try: |
|
88 | try: | |
89 | os.chdir(self.path) |
|
89 | os.chdir(self.path) | |
90 |
|
90 | |||
91 | cache = b'update' |
|
91 | cache = b'update' | |
92 | if not self.ui.configbool(b'convert', b'cvsps.cache'): |
|
92 | if not self.ui.configbool(b'convert', b'cvsps.cache'): | |
93 | cache = None |
|
93 | cache = None | |
94 | db = cvsps.createlog(self.ui, cache=cache) |
|
94 | db = cvsps.createlog(self.ui, cache=cache) | |
95 | db = cvsps.createchangeset( |
|
95 | db = cvsps.createchangeset( | |
96 | self.ui, |
|
96 | self.ui, | |
97 | db, |
|
97 | db, | |
98 | fuzz=int(self.ui.config(b'convert', b'cvsps.fuzz')), |
|
98 | fuzz=int(self.ui.config(b'convert', b'cvsps.fuzz')), | |
99 | mergeto=self.ui.config(b'convert', b'cvsps.mergeto'), |
|
99 | mergeto=self.ui.config(b'convert', b'cvsps.mergeto'), | |
100 | mergefrom=self.ui.config(b'convert', b'cvsps.mergefrom'), |
|
100 | mergefrom=self.ui.config(b'convert', b'cvsps.mergefrom'), | |
101 | ) |
|
101 | ) | |
102 |
|
102 | |||
103 | for cs in db: |
|
103 | for cs in db: | |
104 | if maxrev and cs.id > maxrev: |
|
104 | if maxrev and cs.id > maxrev: | |
105 | break |
|
105 | break | |
106 | id = b"%d" % cs.id |
|
106 | id = b"%d" % cs.id | |
107 | cs.author = self.recode(cs.author) |
|
107 | cs.author = self.recode(cs.author) | |
108 | self.lastbranch[cs.branch] = id |
|
108 | self.lastbranch[cs.branch] = id | |
109 | cs.comment = self.recode(cs.comment) |
|
109 | cs.comment = self.recode(cs.comment) | |
110 | if self.ui.configbool(b'convert', b'localtimezone'): |
|
110 | if self.ui.configbool(b'convert', b'localtimezone'): | |
111 | cs.date = makedatetimestamp(cs.date[0]) |
|
111 | cs.date = makedatetimestamp(cs.date[0]) | |
112 | date = dateutil.datestr(cs.date, b'%Y-%m-%d %H:%M:%S %1%2') |
|
112 | date = dateutil.datestr(cs.date, b'%Y-%m-%d %H:%M:%S %1%2') | |
113 | self.tags.update(dict.fromkeys(cs.tags, id)) |
|
113 | self.tags.update(dict.fromkeys(cs.tags, id)) | |
114 |
|
114 | |||
115 | files = {} |
|
115 | files = {} | |
116 | for f in cs.entries: |
|
116 | for f in cs.entries: | |
117 | files[f.file] = b"%s%s" % ( |
|
117 | files[f.file] = b"%s%s" % ( | |
118 | b'.'.join([(b"%d" % x) for x in f.revision]), |
|
118 | b'.'.join([(b"%d" % x) for x in f.revision]), | |
119 | [b'', b'(DEAD)'][f.dead], |
|
119 | [b'', b'(DEAD)'][f.dead], | |
120 | ) |
|
120 | ) | |
121 |
|
121 | |||
122 | # add current commit to set |
|
122 | # add current commit to set | |
123 | c = commit( |
|
123 | c = commit( | |
124 | author=cs.author, |
|
124 | author=cs.author, | |
125 | date=date, |
|
125 | date=date, | |
126 | parents=[(b"%d" % p.id) for p in cs.parents], |
|
126 | parents=[(b"%d" % p.id) for p in cs.parents], | |
127 | desc=cs.comment, |
|
127 | desc=cs.comment, | |
128 | branch=cs.branch or b'', |
|
128 | branch=cs.branch or b'', | |
129 | ) |
|
129 | ) | |
130 | self.changeset[id] = c |
|
130 | self.changeset[id] = c | |
131 | self.files[id] = files |
|
131 | self.files[id] = files | |
132 |
|
132 | |||
133 | self.heads = self.lastbranch.values() |
|
133 | self.heads = self.lastbranch.values() | |
134 | finally: |
|
134 | finally: | |
135 | os.chdir(d) |
|
135 | os.chdir(d) | |
136 |
|
136 | |||
137 | def _connect(self): |
|
137 | def _connect(self): | |
138 | root = self.cvsroot |
|
138 | root = self.cvsroot | |
139 | conntype = None |
|
139 | conntype = None | |
140 | user, host = None, None |
|
140 | user, host = None, None | |
141 | cmd = [b'cvs', b'server'] |
|
141 | cmd = [b'cvs', b'server'] | |
142 |
|
142 | |||
143 | self.ui.status(_(b"connecting to %s\n") % root) |
|
143 | self.ui.status(_(b"connecting to %s\n") % root) | |
144 |
|
144 | |||
145 | if root.startswith(b":pserver:"): |
|
145 | if root.startswith(b":pserver:"): | |
146 | root = root[9:] |
|
146 | root = root[9:] | |
147 | m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:/]*)(?::(\d*))?(.*)', root) |
|
147 | m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:/]*)(?::(\d*))?(.*)', root) | |
148 | if m: |
|
148 | if m: | |
149 | conntype = b"pserver" |
|
149 | conntype = b"pserver" | |
150 | user, passw, serv, port, root = m.groups() |
|
150 | user, passw, serv, port, root = m.groups() | |
151 | if not user: |
|
151 | if not user: | |
152 | user = b"anonymous" |
|
152 | user = b"anonymous" | |
153 | if not port: |
|
153 | if not port: | |
154 | port = 2401 |
|
154 | port = 2401 | |
155 | else: |
|
155 | else: | |
156 | port = int(port) |
|
156 | port = int(port) | |
157 | format0 = b":pserver:%s@%s:%s" % (user, serv, root) |
|
157 | format0 = b":pserver:%s@%s:%s" % (user, serv, root) | |
158 | format1 = b":pserver:%s@%s:%d%s" % (user, serv, port, root) |
|
158 | format1 = b":pserver:%s@%s:%d%s" % (user, serv, port, root) | |
159 |
|
159 | |||
160 | if not passw: |
|
160 | if not passw: | |
161 | passw = b"A" |
|
161 | passw = b"A" | |
162 | cvspass = os.path.expanduser(b"~/.cvspass") |
|
162 | cvspass = os.path.expanduser(b"~/.cvspass") | |
163 | try: |
|
163 | try: | |
164 | pf = open(cvspass, b'rb') |
|
164 | pf = open(cvspass, b'rb') | |
165 | for line in pf.read().splitlines(): |
|
165 | for line in pf.read().splitlines(): | |
166 | part1, part2 = line.split(b' ', 1) |
|
166 | part1, part2 = line.split(b' ', 1) | |
167 | # /1 :pserver:user@example.com:2401/cvsroot/foo |
|
167 | # /1 :pserver:user@example.com:2401/cvsroot/foo | |
168 | # Ah<Z |
|
168 | # Ah<Z | |
169 | if part1 == b'/1': |
|
169 | if part1 == b'/1': | |
170 | part1, part2 = part2.split(b' ', 1) |
|
170 | part1, part2 = part2.split(b' ', 1) | |
171 | format = format1 |
|
171 | format = format1 | |
172 | # :pserver:user@example.com:/cvsroot/foo Ah<Z |
|
172 | # :pserver:user@example.com:/cvsroot/foo Ah<Z | |
173 | else: |
|
173 | else: | |
174 | format = format0 |
|
174 | format = format0 | |
175 | if part1 == format: |
|
175 | if part1 == format: | |
176 | passw = part2 |
|
176 | passw = part2 | |
177 | break |
|
177 | break | |
178 | pf.close() |
|
178 | pf.close() | |
179 | except IOError as inst: |
|
179 | except IOError as inst: | |
180 | if inst.errno != errno.ENOENT: |
|
180 | if inst.errno != errno.ENOENT: | |
181 | if not getattr(inst, 'filename', None): |
|
181 | if not getattr(inst, 'filename', None): | |
182 | inst.filename = cvspass |
|
182 | inst.filename = cvspass | |
183 | raise |
|
183 | raise | |
184 |
|
184 | |||
185 | sck = socket.socket() |
|
185 | sck = socket.socket() | |
186 | sck.connect((serv, port)) |
|
186 | sck.connect((serv, port)) | |
187 | sck.send( |
|
187 | sck.send( | |
188 | b"\n".join( |
|
188 | b"\n".join( | |
189 | [ |
|
189 | [ | |
190 | b"BEGIN AUTH REQUEST", |
|
190 | b"BEGIN AUTH REQUEST", | |
191 | root, |
|
191 | root, | |
192 | user, |
|
192 | user, | |
193 | passw, |
|
193 | passw, | |
194 | b"END AUTH REQUEST", |
|
194 | b"END AUTH REQUEST", | |
195 | b"", |
|
195 | b"", | |
196 | ] |
|
196 | ] | |
197 | ) |
|
197 | ) | |
198 | ) |
|
198 | ) | |
199 | if sck.recv(128) != b"I LOVE YOU\n": |
|
199 | if sck.recv(128) != b"I LOVE YOU\n": | |
200 | raise error.Abort(_(b"CVS pserver authentication failed")) |
|
200 | raise error.Abort(_(b"CVS pserver authentication failed")) | |
201 |
|
201 | |||
202 | self.writep = self.readp = sck.makefile(b'r+') |
|
202 | self.writep = self.readp = sck.makefile(b'r+') | |
203 |
|
203 | |||
204 | if not conntype and root.startswith(b":local:"): |
|
204 | if not conntype and root.startswith(b":local:"): | |
205 | conntype = b"local" |
|
205 | conntype = b"local" | |
206 | root = root[7:] |
|
206 | root = root[7:] | |
207 |
|
207 | |||
208 | if not conntype: |
|
208 | if not conntype: | |
209 | # :ext:user@host/home/user/path/to/cvsroot |
|
209 | # :ext:user@host/home/user/path/to/cvsroot | |
210 | if root.startswith(b":ext:"): |
|
210 | if root.startswith(b":ext:"): | |
211 | root = root[5:] |
|
211 | root = root[5:] | |
212 | m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root) |
|
212 | m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root) | |
213 | # Do not take Windows path "c:\foo\bar" for a connection strings |
|
213 | # Do not take Windows path "c:\foo\bar" for a connection strings | |
214 | if os.path.isdir(root) or not m: |
|
214 | if os.path.isdir(root) or not m: | |
215 | conntype = b"local" |
|
215 | conntype = b"local" | |
216 | else: |
|
216 | else: | |
217 | conntype = b"rsh" |
|
217 | conntype = b"rsh" | |
218 | user, host, root = m.group(1), m.group(2), m.group(3) |
|
218 | user, host, root = m.group(1), m.group(2), m.group(3) | |
219 |
|
219 | |||
220 | if conntype != b"pserver": |
|
220 | if conntype != b"pserver": | |
221 | if conntype == b"rsh": |
|
221 | if conntype == b"rsh": | |
222 | rsh = encoding.environ.get(b"CVS_RSH") or b"ssh" |
|
222 | rsh = encoding.environ.get(b"CVS_RSH") or b"ssh" | |
223 | if user: |
|
223 | if user: | |
224 | cmd = [rsh, b'-l', user, host] + cmd |
|
224 | cmd = [rsh, b'-l', user, host] + cmd | |
225 | else: |
|
225 | else: | |
226 | cmd = [rsh, host] + cmd |
|
226 | cmd = [rsh, host] + cmd | |
227 |
|
227 | |||
228 | # popen2 does not support argument lists under Windows |
|
228 | # popen2 does not support argument lists under Windows | |
229 |
cmd = |
|
229 | cmd = b' '.join(procutil.shellquote(arg) for arg in cmd) | |
230 | cmd = procutil.quotecommand(b' '.join(cmd)) |
|
|||
231 | self.writep, self.readp = procutil.popen2(cmd) |
|
230 | self.writep, self.readp = procutil.popen2(cmd) | |
232 |
|
231 | |||
233 | self.realroot = root |
|
232 | self.realroot = root | |
234 |
|
233 | |||
235 | self.writep.write(b"Root %s\n" % root) |
|
234 | self.writep.write(b"Root %s\n" % root) | |
236 | self.writep.write( |
|
235 | self.writep.write( | |
237 | b"Valid-responses ok error Valid-requests Mode" |
|
236 | b"Valid-responses ok error Valid-requests Mode" | |
238 | b" M Mbinary E Checked-in Created Updated" |
|
237 | b" M Mbinary E Checked-in Created Updated" | |
239 | b" Merged Removed\n" |
|
238 | b" Merged Removed\n" | |
240 | ) |
|
239 | ) | |
241 | self.writep.write(b"valid-requests\n") |
|
240 | self.writep.write(b"valid-requests\n") | |
242 | self.writep.flush() |
|
241 | self.writep.flush() | |
243 | r = self.readp.readline() |
|
242 | r = self.readp.readline() | |
244 | if not r.startswith(b"Valid-requests"): |
|
243 | if not r.startswith(b"Valid-requests"): | |
245 | raise error.Abort( |
|
244 | raise error.Abort( | |
246 | _( |
|
245 | _( | |
247 | b'unexpected response from CVS server ' |
|
246 | b'unexpected response from CVS server ' | |
248 | b'(expected "Valid-requests", but got %r)' |
|
247 | b'(expected "Valid-requests", but got %r)' | |
249 | ) |
|
248 | ) | |
250 | % r |
|
249 | % r | |
251 | ) |
|
250 | ) | |
252 | if b"UseUnchanged" in r: |
|
251 | if b"UseUnchanged" in r: | |
253 | self.writep.write(b"UseUnchanged\n") |
|
252 | self.writep.write(b"UseUnchanged\n") | |
254 | self.writep.flush() |
|
253 | self.writep.flush() | |
255 | self.readp.readline() |
|
254 | self.readp.readline() | |
256 |
|
255 | |||
257 | def getheads(self): |
|
256 | def getheads(self): | |
258 | self._parse() |
|
257 | self._parse() | |
259 | return self.heads |
|
258 | return self.heads | |
260 |
|
259 | |||
261 | def getfile(self, name, rev): |
|
260 | def getfile(self, name, rev): | |
262 | def chunkedread(fp, count): |
|
261 | def chunkedread(fp, count): | |
263 | # file-objects returned by socket.makefile() do not handle |
|
262 | # file-objects returned by socket.makefile() do not handle | |
264 | # large read() requests very well. |
|
263 | # large read() requests very well. | |
265 | chunksize = 65536 |
|
264 | chunksize = 65536 | |
266 | output = stringio() |
|
265 | output = stringio() | |
267 | while count > 0: |
|
266 | while count > 0: | |
268 | data = fp.read(min(count, chunksize)) |
|
267 | data = fp.read(min(count, chunksize)) | |
269 | if not data: |
|
268 | if not data: | |
270 | raise error.Abort( |
|
269 | raise error.Abort( | |
271 | _(b"%d bytes missing from remote file") % count |
|
270 | _(b"%d bytes missing from remote file") % count | |
272 | ) |
|
271 | ) | |
273 | count -= len(data) |
|
272 | count -= len(data) | |
274 | output.write(data) |
|
273 | output.write(data) | |
275 | return output.getvalue() |
|
274 | return output.getvalue() | |
276 |
|
275 | |||
277 | self._parse() |
|
276 | self._parse() | |
278 | if rev.endswith(b"(DEAD)"): |
|
277 | if rev.endswith(b"(DEAD)"): | |
279 | return None, None |
|
278 | return None, None | |
280 |
|
279 | |||
281 | args = (b"-N -P -kk -r %s --" % rev).split() |
|
280 | args = (b"-N -P -kk -r %s --" % rev).split() | |
282 | args.append(self.cvsrepo + b'/' + name) |
|
281 | args.append(self.cvsrepo + b'/' + name) | |
283 | for x in args: |
|
282 | for x in args: | |
284 | self.writep.write(b"Argument %s\n" % x) |
|
283 | self.writep.write(b"Argument %s\n" % x) | |
285 | self.writep.write(b"Directory .\n%s\nco\n" % self.realroot) |
|
284 | self.writep.write(b"Directory .\n%s\nco\n" % self.realroot) | |
286 | self.writep.flush() |
|
285 | self.writep.flush() | |
287 |
|
286 | |||
288 | data = b"" |
|
287 | data = b"" | |
289 | mode = None |
|
288 | mode = None | |
290 | while True: |
|
289 | while True: | |
291 | line = self.readp.readline() |
|
290 | line = self.readp.readline() | |
292 | if line.startswith(b"Created ") or line.startswith(b"Updated "): |
|
291 | if line.startswith(b"Created ") or line.startswith(b"Updated "): | |
293 | self.readp.readline() # path |
|
292 | self.readp.readline() # path | |
294 | self.readp.readline() # entries |
|
293 | self.readp.readline() # entries | |
295 | mode = self.readp.readline()[:-1] |
|
294 | mode = self.readp.readline()[:-1] | |
296 | count = int(self.readp.readline()[:-1]) |
|
295 | count = int(self.readp.readline()[:-1]) | |
297 | data = chunkedread(self.readp, count) |
|
296 | data = chunkedread(self.readp, count) | |
298 | elif line.startswith(b" "): |
|
297 | elif line.startswith(b" "): | |
299 | data += line[1:] |
|
298 | data += line[1:] | |
300 | elif line.startswith(b"M "): |
|
299 | elif line.startswith(b"M "): | |
301 | pass |
|
300 | pass | |
302 | elif line.startswith(b"Mbinary "): |
|
301 | elif line.startswith(b"Mbinary "): | |
303 | count = int(self.readp.readline()[:-1]) |
|
302 | count = int(self.readp.readline()[:-1]) | |
304 | data = chunkedread(self.readp, count) |
|
303 | data = chunkedread(self.readp, count) | |
305 | else: |
|
304 | else: | |
306 | if line == b"ok\n": |
|
305 | if line == b"ok\n": | |
307 | if mode is None: |
|
306 | if mode is None: | |
308 | raise error.Abort(_(b'malformed response from CVS')) |
|
307 | raise error.Abort(_(b'malformed response from CVS')) | |
309 | return (data, b"x" in mode and b"x" or b"") |
|
308 | return (data, b"x" in mode and b"x" or b"") | |
310 | elif line.startswith(b"E "): |
|
309 | elif line.startswith(b"E "): | |
311 | self.ui.warn(_(b"cvs server: %s\n") % line[2:]) |
|
310 | self.ui.warn(_(b"cvs server: %s\n") % line[2:]) | |
312 | elif line.startswith(b"Remove"): |
|
311 | elif line.startswith(b"Remove"): | |
313 | self.readp.readline() |
|
312 | self.readp.readline() | |
314 | else: |
|
313 | else: | |
315 | raise error.Abort(_(b"unknown CVS response: %s") % line) |
|
314 | raise error.Abort(_(b"unknown CVS response: %s") % line) | |
316 |
|
315 | |||
317 | def getchanges(self, rev, full): |
|
316 | def getchanges(self, rev, full): | |
318 | if full: |
|
317 | if full: | |
319 | raise error.Abort(_(b"convert from cvs does not support --full")) |
|
318 | raise error.Abort(_(b"convert from cvs does not support --full")) | |
320 | self._parse() |
|
319 | self._parse() | |
321 | return sorted(pycompat.iteritems(self.files[rev])), {}, set() |
|
320 | return sorted(pycompat.iteritems(self.files[rev])), {}, set() | |
322 |
|
321 | |||
323 | def getcommit(self, rev): |
|
322 | def getcommit(self, rev): | |
324 | self._parse() |
|
323 | self._parse() | |
325 | return self.changeset[rev] |
|
324 | return self.changeset[rev] | |
326 |
|
325 | |||
327 | def gettags(self): |
|
326 | def gettags(self): | |
328 | self._parse() |
|
327 | self._parse() | |
329 | return self.tags |
|
328 | return self.tags | |
330 |
|
329 | |||
331 | def getchangedfiles(self, rev, i): |
|
330 | def getchangedfiles(self, rev, i): | |
332 | self._parse() |
|
331 | self._parse() | |
333 | return sorted(self.files[rev]) |
|
332 | return sorted(self.files[rev]) |
@@ -1,378 +1,378 b'' | |||||
1 | # gnuarch.py - GNU Arch support for the convert extension |
|
1 | # gnuarch.py - GNU Arch support for the convert extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org> |
|
3 | # Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org> | |
4 | # and others |
|
4 | # and others | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import os |
|
10 | import os | |
11 | import shutil |
|
11 | import shutil | |
12 | import stat |
|
12 | import stat | |
13 | import tempfile |
|
13 | import tempfile | |
14 |
|
14 | |||
15 | from mercurial.i18n import _ |
|
15 | from mercurial.i18n import _ | |
16 | from mercurial import ( |
|
16 | from mercurial import ( | |
17 | encoding, |
|
17 | encoding, | |
18 | error, |
|
18 | error, | |
19 | mail, |
|
19 | mail, | |
20 | pycompat, |
|
20 | pycompat, | |
21 | util, |
|
21 | util, | |
22 | ) |
|
22 | ) | |
23 | from mercurial.utils import ( |
|
23 | from mercurial.utils import ( | |
24 | dateutil, |
|
24 | dateutil, | |
25 | procutil, |
|
25 | procutil, | |
26 | ) |
|
26 | ) | |
27 | from . import common |
|
27 | from . import common | |
28 |
|
28 | |||
29 |
|
29 | |||
30 | class gnuarch_source(common.converter_source, common.commandline): |
|
30 | class gnuarch_source(common.converter_source, common.commandline): | |
31 | class gnuarch_rev(object): |
|
31 | class gnuarch_rev(object): | |
32 | def __init__(self, rev): |
|
32 | def __init__(self, rev): | |
33 | self.rev = rev |
|
33 | self.rev = rev | |
34 | self.summary = b'' |
|
34 | self.summary = b'' | |
35 | self.date = None |
|
35 | self.date = None | |
36 | self.author = b'' |
|
36 | self.author = b'' | |
37 | self.continuationof = None |
|
37 | self.continuationof = None | |
38 | self.add_files = [] |
|
38 | self.add_files = [] | |
39 | self.mod_files = [] |
|
39 | self.mod_files = [] | |
40 | self.del_files = [] |
|
40 | self.del_files = [] | |
41 | self.ren_files = {} |
|
41 | self.ren_files = {} | |
42 | self.ren_dirs = {} |
|
42 | self.ren_dirs = {} | |
43 |
|
43 | |||
44 | def __init__(self, ui, repotype, path, revs=None): |
|
44 | def __init__(self, ui, repotype, path, revs=None): | |
45 | super(gnuarch_source, self).__init__(ui, repotype, path, revs=revs) |
|
45 | super(gnuarch_source, self).__init__(ui, repotype, path, revs=revs) | |
46 |
|
46 | |||
47 | if not os.path.exists(os.path.join(path, b'{arch}')): |
|
47 | if not os.path.exists(os.path.join(path, b'{arch}')): | |
48 | raise common.NoRepo( |
|
48 | raise common.NoRepo( | |
49 | _(b"%s does not look like a GNU Arch repository") % path |
|
49 | _(b"%s does not look like a GNU Arch repository") % path | |
50 | ) |
|
50 | ) | |
51 |
|
51 | |||
52 | # Could use checktool, but we want to check for baz or tla. |
|
52 | # Could use checktool, but we want to check for baz or tla. | |
53 | self.execmd = None |
|
53 | self.execmd = None | |
54 | if procutil.findexe(b'baz'): |
|
54 | if procutil.findexe(b'baz'): | |
55 | self.execmd = b'baz' |
|
55 | self.execmd = b'baz' | |
56 | else: |
|
56 | else: | |
57 | if procutil.findexe(b'tla'): |
|
57 | if procutil.findexe(b'tla'): | |
58 | self.execmd = b'tla' |
|
58 | self.execmd = b'tla' | |
59 | else: |
|
59 | else: | |
60 | raise error.Abort(_(b'cannot find a GNU Arch tool')) |
|
60 | raise error.Abort(_(b'cannot find a GNU Arch tool')) | |
61 |
|
61 | |||
62 | common.commandline.__init__(self, ui, self.execmd) |
|
62 | common.commandline.__init__(self, ui, self.execmd) | |
63 |
|
63 | |||
64 | self.path = os.path.realpath(path) |
|
64 | self.path = os.path.realpath(path) | |
65 | self.tmppath = None |
|
65 | self.tmppath = None | |
66 |
|
66 | |||
67 | self.treeversion = None |
|
67 | self.treeversion = None | |
68 | self.lastrev = None |
|
68 | self.lastrev = None | |
69 | self.changes = {} |
|
69 | self.changes = {} | |
70 | self.parents = {} |
|
70 | self.parents = {} | |
71 | self.tags = {} |
|
71 | self.tags = {} | |
72 | self.encoding = encoding.encoding |
|
72 | self.encoding = encoding.encoding | |
73 | self.archives = [] |
|
73 | self.archives = [] | |
74 |
|
74 | |||
75 | def before(self): |
|
75 | def before(self): | |
76 | # Get registered archives |
|
76 | # Get registered archives | |
77 | self.archives = [ |
|
77 | self.archives = [ | |
78 | i.rstrip(b'\n') for i in self.runlines0(b'archives', b'-n') |
|
78 | i.rstrip(b'\n') for i in self.runlines0(b'archives', b'-n') | |
79 | ] |
|
79 | ] | |
80 |
|
80 | |||
81 | if self.execmd == b'tla': |
|
81 | if self.execmd == b'tla': | |
82 | output = self.run0(b'tree-version', self.path) |
|
82 | output = self.run0(b'tree-version', self.path) | |
83 | else: |
|
83 | else: | |
84 | output = self.run0(b'tree-version', b'-d', self.path) |
|
84 | output = self.run0(b'tree-version', b'-d', self.path) | |
85 | self.treeversion = output.strip() |
|
85 | self.treeversion = output.strip() | |
86 |
|
86 | |||
87 | # Get name of temporary directory |
|
87 | # Get name of temporary directory | |
88 | version = self.treeversion.split(b'/') |
|
88 | version = self.treeversion.split(b'/') | |
89 | self.tmppath = os.path.join( |
|
89 | self.tmppath = os.path.join( | |
90 | pycompat.fsencode(tempfile.gettempdir()), b'hg-%s' % version[1] |
|
90 | pycompat.fsencode(tempfile.gettempdir()), b'hg-%s' % version[1] | |
91 | ) |
|
91 | ) | |
92 |
|
92 | |||
93 | # Generate parents dictionary |
|
93 | # Generate parents dictionary | |
94 | self.parents[None] = [] |
|
94 | self.parents[None] = [] | |
95 | treeversion = self.treeversion |
|
95 | treeversion = self.treeversion | |
96 | child = None |
|
96 | child = None | |
97 | while treeversion: |
|
97 | while treeversion: | |
98 | self.ui.status(_(b'analyzing tree version %s...\n') % treeversion) |
|
98 | self.ui.status(_(b'analyzing tree version %s...\n') % treeversion) | |
99 |
|
99 | |||
100 | archive = treeversion.split(b'/')[0] |
|
100 | archive = treeversion.split(b'/')[0] | |
101 | if archive not in self.archives: |
|
101 | if archive not in self.archives: | |
102 | self.ui.status( |
|
102 | self.ui.status( | |
103 | _( |
|
103 | _( | |
104 | b'tree analysis stopped because it points to ' |
|
104 | b'tree analysis stopped because it points to ' | |
105 | b'an unregistered archive %s...\n' |
|
105 | b'an unregistered archive %s...\n' | |
106 | ) |
|
106 | ) | |
107 | % archive |
|
107 | % archive | |
108 | ) |
|
108 | ) | |
109 | break |
|
109 | break | |
110 |
|
110 | |||
111 | # Get the complete list of revisions for that tree version |
|
111 | # Get the complete list of revisions for that tree version | |
112 | output, status = self.runlines( |
|
112 | output, status = self.runlines( | |
113 | b'revisions', b'-r', b'-f', treeversion |
|
113 | b'revisions', b'-r', b'-f', treeversion | |
114 | ) |
|
114 | ) | |
115 | self.checkexit( |
|
115 | self.checkexit( | |
116 | status, b'failed retrieving revisions for %s' % treeversion |
|
116 | status, b'failed retrieving revisions for %s' % treeversion | |
117 | ) |
|
117 | ) | |
118 |
|
118 | |||
119 | # No new iteration unless a revision has a continuation-of header |
|
119 | # No new iteration unless a revision has a continuation-of header | |
120 | treeversion = None |
|
120 | treeversion = None | |
121 |
|
121 | |||
122 | for l in output: |
|
122 | for l in output: | |
123 | rev = l.strip() |
|
123 | rev = l.strip() | |
124 | self.changes[rev] = self.gnuarch_rev(rev) |
|
124 | self.changes[rev] = self.gnuarch_rev(rev) | |
125 | self.parents[rev] = [] |
|
125 | self.parents[rev] = [] | |
126 |
|
126 | |||
127 | # Read author, date and summary |
|
127 | # Read author, date and summary | |
128 | catlog, status = self.run(b'cat-log', b'-d', self.path, rev) |
|
128 | catlog, status = self.run(b'cat-log', b'-d', self.path, rev) | |
129 | if status: |
|
129 | if status: | |
130 | catlog = self.run0(b'cat-archive-log', rev) |
|
130 | catlog = self.run0(b'cat-archive-log', rev) | |
131 | self._parsecatlog(catlog, rev) |
|
131 | self._parsecatlog(catlog, rev) | |
132 |
|
132 | |||
133 | # Populate the parents map |
|
133 | # Populate the parents map | |
134 | self.parents[child].append(rev) |
|
134 | self.parents[child].append(rev) | |
135 |
|
135 | |||
136 | # Keep track of the current revision as the child of the next |
|
136 | # Keep track of the current revision as the child of the next | |
137 | # revision scanned |
|
137 | # revision scanned | |
138 | child = rev |
|
138 | child = rev | |
139 |
|
139 | |||
140 | # Check if we have to follow the usual incremental history |
|
140 | # Check if we have to follow the usual incremental history | |
141 | # or if we have to 'jump' to a different treeversion given |
|
141 | # or if we have to 'jump' to a different treeversion given | |
142 | # by the continuation-of header. |
|
142 | # by the continuation-of header. | |
143 | if self.changes[rev].continuationof: |
|
143 | if self.changes[rev].continuationof: | |
144 | treeversion = b'--'.join( |
|
144 | treeversion = b'--'.join( | |
145 | self.changes[rev].continuationof.split(b'--')[:-1] |
|
145 | self.changes[rev].continuationof.split(b'--')[:-1] | |
146 | ) |
|
146 | ) | |
147 | break |
|
147 | break | |
148 |
|
148 | |||
149 | # If we reached a base-0 revision w/o any continuation-of |
|
149 | # If we reached a base-0 revision w/o any continuation-of | |
150 | # header, it means the tree history ends here. |
|
150 | # header, it means the tree history ends here. | |
151 | if rev[-6:] == b'base-0': |
|
151 | if rev[-6:] == b'base-0': | |
152 | break |
|
152 | break | |
153 |
|
153 | |||
154 | def after(self): |
|
154 | def after(self): | |
155 | self.ui.debug(b'cleaning up %s\n' % self.tmppath) |
|
155 | self.ui.debug(b'cleaning up %s\n' % self.tmppath) | |
156 | shutil.rmtree(self.tmppath, ignore_errors=True) |
|
156 | shutil.rmtree(self.tmppath, ignore_errors=True) | |
157 |
|
157 | |||
158 | def getheads(self): |
|
158 | def getheads(self): | |
159 | return self.parents[None] |
|
159 | return self.parents[None] | |
160 |
|
160 | |||
161 | def getfile(self, name, rev): |
|
161 | def getfile(self, name, rev): | |
162 | if rev != self.lastrev: |
|
162 | if rev != self.lastrev: | |
163 | raise error.Abort(_(b'internal calling inconsistency')) |
|
163 | raise error.Abort(_(b'internal calling inconsistency')) | |
164 |
|
164 | |||
165 | if not os.path.lexists(os.path.join(self.tmppath, name)): |
|
165 | if not os.path.lexists(os.path.join(self.tmppath, name)): | |
166 | return None, None |
|
166 | return None, None | |
167 |
|
167 | |||
168 | return self._getfile(name, rev) |
|
168 | return self._getfile(name, rev) | |
169 |
|
169 | |||
170 | def getchanges(self, rev, full): |
|
170 | def getchanges(self, rev, full): | |
171 | if full: |
|
171 | if full: | |
172 | raise error.Abort(_(b"convert from arch does not support --full")) |
|
172 | raise error.Abort(_(b"convert from arch does not support --full")) | |
173 | self._update(rev) |
|
173 | self._update(rev) | |
174 | changes = [] |
|
174 | changes = [] | |
175 | copies = {} |
|
175 | copies = {} | |
176 |
|
176 | |||
177 | for f in self.changes[rev].add_files: |
|
177 | for f in self.changes[rev].add_files: | |
178 | changes.append((f, rev)) |
|
178 | changes.append((f, rev)) | |
179 |
|
179 | |||
180 | for f in self.changes[rev].mod_files: |
|
180 | for f in self.changes[rev].mod_files: | |
181 | changes.append((f, rev)) |
|
181 | changes.append((f, rev)) | |
182 |
|
182 | |||
183 | for f in self.changes[rev].del_files: |
|
183 | for f in self.changes[rev].del_files: | |
184 | changes.append((f, rev)) |
|
184 | changes.append((f, rev)) | |
185 |
|
185 | |||
186 | for src in self.changes[rev].ren_files: |
|
186 | for src in self.changes[rev].ren_files: | |
187 | to = self.changes[rev].ren_files[src] |
|
187 | to = self.changes[rev].ren_files[src] | |
188 | changes.append((src, rev)) |
|
188 | changes.append((src, rev)) | |
189 | changes.append((to, rev)) |
|
189 | changes.append((to, rev)) | |
190 | copies[to] = src |
|
190 | copies[to] = src | |
191 |
|
191 | |||
192 | for src in self.changes[rev].ren_dirs: |
|
192 | for src in self.changes[rev].ren_dirs: | |
193 | to = self.changes[rev].ren_dirs[src] |
|
193 | to = self.changes[rev].ren_dirs[src] | |
194 | chgs, cps = self._rendirchanges(src, to) |
|
194 | chgs, cps = self._rendirchanges(src, to) | |
195 | changes += [(f, rev) for f in chgs] |
|
195 | changes += [(f, rev) for f in chgs] | |
196 | copies.update(cps) |
|
196 | copies.update(cps) | |
197 |
|
197 | |||
198 | self.lastrev = rev |
|
198 | self.lastrev = rev | |
199 | return sorted(set(changes)), copies, set() |
|
199 | return sorted(set(changes)), copies, set() | |
200 |
|
200 | |||
201 | def getcommit(self, rev): |
|
201 | def getcommit(self, rev): | |
202 | changes = self.changes[rev] |
|
202 | changes = self.changes[rev] | |
203 | return common.commit( |
|
203 | return common.commit( | |
204 | author=changes.author, |
|
204 | author=changes.author, | |
205 | date=changes.date, |
|
205 | date=changes.date, | |
206 | desc=changes.summary, |
|
206 | desc=changes.summary, | |
207 | parents=self.parents[rev], |
|
207 | parents=self.parents[rev], | |
208 | rev=rev, |
|
208 | rev=rev, | |
209 | ) |
|
209 | ) | |
210 |
|
210 | |||
211 | def gettags(self): |
|
211 | def gettags(self): | |
212 | return self.tags |
|
212 | return self.tags | |
213 |
|
213 | |||
214 | def _execute(self, cmd, *args, **kwargs): |
|
214 | def _execute(self, cmd, *args, **kwargs): | |
215 | cmdline = [self.execmd, cmd] |
|
215 | cmdline = [self.execmd, cmd] | |
216 | cmdline += args |
|
216 | cmdline += args | |
217 | cmdline = [procutil.shellquote(arg) for arg in cmdline] |
|
217 | cmdline = [procutil.shellquote(arg) for arg in cmdline] | |
218 | bdevnull = pycompat.bytestr(os.devnull) |
|
218 | bdevnull = pycompat.bytestr(os.devnull) | |
219 | cmdline += [b'>', bdevnull, b'2>', bdevnull] |
|
219 | cmdline += [b'>', bdevnull, b'2>', bdevnull] | |
220 |
cmdline = |
|
220 | cmdline = b' '.join(cmdline) | |
221 | self.ui.debug(cmdline, b'\n') |
|
221 | self.ui.debug(cmdline, b'\n') | |
222 | return os.system(pycompat.rapply(procutil.tonativestr, cmdline)) |
|
222 | return os.system(pycompat.rapply(procutil.tonativestr, cmdline)) | |
223 |
|
223 | |||
224 | def _update(self, rev): |
|
224 | def _update(self, rev): | |
225 | self.ui.debug(b'applying revision %s...\n' % rev) |
|
225 | self.ui.debug(b'applying revision %s...\n' % rev) | |
226 | changeset, status = self.runlines(b'replay', b'-d', self.tmppath, rev) |
|
226 | changeset, status = self.runlines(b'replay', b'-d', self.tmppath, rev) | |
227 | if status: |
|
227 | if status: | |
228 | # Something went wrong while merging (baz or tla |
|
228 | # Something went wrong while merging (baz or tla | |
229 | # issue?), get latest revision and try from there |
|
229 | # issue?), get latest revision and try from there | |
230 | shutil.rmtree(self.tmppath, ignore_errors=True) |
|
230 | shutil.rmtree(self.tmppath, ignore_errors=True) | |
231 | self._obtainrevision(rev) |
|
231 | self._obtainrevision(rev) | |
232 | else: |
|
232 | else: | |
233 | old_rev = self.parents[rev][0] |
|
233 | old_rev = self.parents[rev][0] | |
234 | self.ui.debug( |
|
234 | self.ui.debug( | |
235 | b'computing changeset between %s and %s...\n' % (old_rev, rev) |
|
235 | b'computing changeset between %s and %s...\n' % (old_rev, rev) | |
236 | ) |
|
236 | ) | |
237 | self._parsechangeset(changeset, rev) |
|
237 | self._parsechangeset(changeset, rev) | |
238 |
|
238 | |||
239 | def _getfile(self, name, rev): |
|
239 | def _getfile(self, name, rev): | |
240 | mode = os.lstat(os.path.join(self.tmppath, name)).st_mode |
|
240 | mode = os.lstat(os.path.join(self.tmppath, name)).st_mode | |
241 | if stat.S_ISLNK(mode): |
|
241 | if stat.S_ISLNK(mode): | |
242 | data = util.readlink(os.path.join(self.tmppath, name)) |
|
242 | data = util.readlink(os.path.join(self.tmppath, name)) | |
243 | if mode: |
|
243 | if mode: | |
244 | mode = b'l' |
|
244 | mode = b'l' | |
245 | else: |
|
245 | else: | |
246 | mode = b'' |
|
246 | mode = b'' | |
247 | else: |
|
247 | else: | |
248 | data = util.readfile(os.path.join(self.tmppath, name)) |
|
248 | data = util.readfile(os.path.join(self.tmppath, name)) | |
249 | mode = (mode & 0o111) and b'x' or b'' |
|
249 | mode = (mode & 0o111) and b'x' or b'' | |
250 | return data, mode |
|
250 | return data, mode | |
251 |
|
251 | |||
252 | def _exclude(self, name): |
|
252 | def _exclude(self, name): | |
253 | exclude = [b'{arch}', b'.arch-ids', b'.arch-inventory'] |
|
253 | exclude = [b'{arch}', b'.arch-ids', b'.arch-inventory'] | |
254 | for exc in exclude: |
|
254 | for exc in exclude: | |
255 | if name.find(exc) != -1: |
|
255 | if name.find(exc) != -1: | |
256 | return True |
|
256 | return True | |
257 | return False |
|
257 | return False | |
258 |
|
258 | |||
259 | def _readcontents(self, path): |
|
259 | def _readcontents(self, path): | |
260 | files = [] |
|
260 | files = [] | |
261 | contents = os.listdir(path) |
|
261 | contents = os.listdir(path) | |
262 | while len(contents) > 0: |
|
262 | while len(contents) > 0: | |
263 | c = contents.pop() |
|
263 | c = contents.pop() | |
264 | p = os.path.join(path, c) |
|
264 | p = os.path.join(path, c) | |
265 | # os.walk could be used, but here we avoid internal GNU |
|
265 | # os.walk could be used, but here we avoid internal GNU | |
266 | # Arch files and directories, thus saving a lot time. |
|
266 | # Arch files and directories, thus saving a lot time. | |
267 | if not self._exclude(p): |
|
267 | if not self._exclude(p): | |
268 | if os.path.isdir(p): |
|
268 | if os.path.isdir(p): | |
269 | contents += [os.path.join(c, f) for f in os.listdir(p)] |
|
269 | contents += [os.path.join(c, f) for f in os.listdir(p)] | |
270 | else: |
|
270 | else: | |
271 | files.append(c) |
|
271 | files.append(c) | |
272 | return files |
|
272 | return files | |
273 |
|
273 | |||
274 | def _rendirchanges(self, src, dest): |
|
274 | def _rendirchanges(self, src, dest): | |
275 | changes = [] |
|
275 | changes = [] | |
276 | copies = {} |
|
276 | copies = {} | |
277 | files = self._readcontents(os.path.join(self.tmppath, dest)) |
|
277 | files = self._readcontents(os.path.join(self.tmppath, dest)) | |
278 | for f in files: |
|
278 | for f in files: | |
279 | s = os.path.join(src, f) |
|
279 | s = os.path.join(src, f) | |
280 | d = os.path.join(dest, f) |
|
280 | d = os.path.join(dest, f) | |
281 | changes.append(s) |
|
281 | changes.append(s) | |
282 | changes.append(d) |
|
282 | changes.append(d) | |
283 | copies[d] = s |
|
283 | copies[d] = s | |
284 | return changes, copies |
|
284 | return changes, copies | |
285 |
|
285 | |||
286 | def _obtainrevision(self, rev): |
|
286 | def _obtainrevision(self, rev): | |
287 | self.ui.debug(b'obtaining revision %s...\n' % rev) |
|
287 | self.ui.debug(b'obtaining revision %s...\n' % rev) | |
288 | output = self._execute(b'get', rev, self.tmppath) |
|
288 | output = self._execute(b'get', rev, self.tmppath) | |
289 | self.checkexit(output) |
|
289 | self.checkexit(output) | |
290 | self.ui.debug(b'analyzing revision %s...\n' % rev) |
|
290 | self.ui.debug(b'analyzing revision %s...\n' % rev) | |
291 | files = self._readcontents(self.tmppath) |
|
291 | files = self._readcontents(self.tmppath) | |
292 | self.changes[rev].add_files += files |
|
292 | self.changes[rev].add_files += files | |
293 |
|
293 | |||
294 | def _stripbasepath(self, path): |
|
294 | def _stripbasepath(self, path): | |
295 | if path.startswith(b'./'): |
|
295 | if path.startswith(b'./'): | |
296 | return path[2:] |
|
296 | return path[2:] | |
297 | return path |
|
297 | return path | |
298 |
|
298 | |||
299 | def _parsecatlog(self, data, rev): |
|
299 | def _parsecatlog(self, data, rev): | |
300 | try: |
|
300 | try: | |
301 | catlog = mail.parsebytes(data) |
|
301 | catlog = mail.parsebytes(data) | |
302 |
|
302 | |||
303 | # Commit date |
|
303 | # Commit date | |
304 | self.changes[rev].date = dateutil.datestr( |
|
304 | self.changes[rev].date = dateutil.datestr( | |
305 | dateutil.strdate(catlog['Standard-date'], b'%Y-%m-%d %H:%M:%S') |
|
305 | dateutil.strdate(catlog['Standard-date'], b'%Y-%m-%d %H:%M:%S') | |
306 | ) |
|
306 | ) | |
307 |
|
307 | |||
308 | # Commit author |
|
308 | # Commit author | |
309 | self.changes[rev].author = self.recode(catlog['Creator']) |
|
309 | self.changes[rev].author = self.recode(catlog['Creator']) | |
310 |
|
310 | |||
311 | # Commit description |
|
311 | # Commit description | |
312 | self.changes[rev].summary = b'\n\n'.join( |
|
312 | self.changes[rev].summary = b'\n\n'.join( | |
313 | ( |
|
313 | ( | |
314 | self.recode(catlog['Summary']), |
|
314 | self.recode(catlog['Summary']), | |
315 | self.recode(catlog.get_payload()), |
|
315 | self.recode(catlog.get_payload()), | |
316 | ) |
|
316 | ) | |
317 | ) |
|
317 | ) | |
318 | self.changes[rev].summary = self.recode(self.changes[rev].summary) |
|
318 | self.changes[rev].summary = self.recode(self.changes[rev].summary) | |
319 |
|
319 | |||
320 | # Commit revision origin when dealing with a branch or tag |
|
320 | # Commit revision origin when dealing with a branch or tag | |
321 | if 'Continuation-of' in catlog: |
|
321 | if 'Continuation-of' in catlog: | |
322 | self.changes[rev].continuationof = self.recode( |
|
322 | self.changes[rev].continuationof = self.recode( | |
323 | catlog['Continuation-of'] |
|
323 | catlog['Continuation-of'] | |
324 | ) |
|
324 | ) | |
325 | except Exception: |
|
325 | except Exception: | |
326 | raise error.Abort(_(b'could not parse cat-log of %s') % rev) |
|
326 | raise error.Abort(_(b'could not parse cat-log of %s') % rev) | |
327 |
|
327 | |||
328 | def _parsechangeset(self, data, rev): |
|
328 | def _parsechangeset(self, data, rev): | |
329 | for l in data: |
|
329 | for l in data: | |
330 | l = l.strip() |
|
330 | l = l.strip() | |
331 | # Added file (ignore added directory) |
|
331 | # Added file (ignore added directory) | |
332 | if l.startswith(b'A') and not l.startswith(b'A/'): |
|
332 | if l.startswith(b'A') and not l.startswith(b'A/'): | |
333 | file = self._stripbasepath(l[1:].strip()) |
|
333 | file = self._stripbasepath(l[1:].strip()) | |
334 | if not self._exclude(file): |
|
334 | if not self._exclude(file): | |
335 | self.changes[rev].add_files.append(file) |
|
335 | self.changes[rev].add_files.append(file) | |
336 | # Deleted file (ignore deleted directory) |
|
336 | # Deleted file (ignore deleted directory) | |
337 | elif l.startswith(b'D') and not l.startswith(b'D/'): |
|
337 | elif l.startswith(b'D') and not l.startswith(b'D/'): | |
338 | file = self._stripbasepath(l[1:].strip()) |
|
338 | file = self._stripbasepath(l[1:].strip()) | |
339 | if not self._exclude(file): |
|
339 | if not self._exclude(file): | |
340 | self.changes[rev].del_files.append(file) |
|
340 | self.changes[rev].del_files.append(file) | |
341 | # Modified binary file |
|
341 | # Modified binary file | |
342 | elif l.startswith(b'Mb'): |
|
342 | elif l.startswith(b'Mb'): | |
343 | file = self._stripbasepath(l[2:].strip()) |
|
343 | file = self._stripbasepath(l[2:].strip()) | |
344 | if not self._exclude(file): |
|
344 | if not self._exclude(file): | |
345 | self.changes[rev].mod_files.append(file) |
|
345 | self.changes[rev].mod_files.append(file) | |
346 | # Modified link |
|
346 | # Modified link | |
347 | elif l.startswith(b'M->'): |
|
347 | elif l.startswith(b'M->'): | |
348 | file = self._stripbasepath(l[3:].strip()) |
|
348 | file = self._stripbasepath(l[3:].strip()) | |
349 | if not self._exclude(file): |
|
349 | if not self._exclude(file): | |
350 | self.changes[rev].mod_files.append(file) |
|
350 | self.changes[rev].mod_files.append(file) | |
351 | # Modified file |
|
351 | # Modified file | |
352 | elif l.startswith(b'M'): |
|
352 | elif l.startswith(b'M'): | |
353 | file = self._stripbasepath(l[1:].strip()) |
|
353 | file = self._stripbasepath(l[1:].strip()) | |
354 | if not self._exclude(file): |
|
354 | if not self._exclude(file): | |
355 | self.changes[rev].mod_files.append(file) |
|
355 | self.changes[rev].mod_files.append(file) | |
356 | # Renamed file (or link) |
|
356 | # Renamed file (or link) | |
357 | elif l.startswith(b'=>'): |
|
357 | elif l.startswith(b'=>'): | |
358 | files = l[2:].strip().split(b' ') |
|
358 | files = l[2:].strip().split(b' ') | |
359 | if len(files) == 1: |
|
359 | if len(files) == 1: | |
360 | files = l[2:].strip().split(b'\t') |
|
360 | files = l[2:].strip().split(b'\t') | |
361 | src = self._stripbasepath(files[0]) |
|
361 | src = self._stripbasepath(files[0]) | |
362 | dst = self._stripbasepath(files[1]) |
|
362 | dst = self._stripbasepath(files[1]) | |
363 | if not self._exclude(src) and not self._exclude(dst): |
|
363 | if not self._exclude(src) and not self._exclude(dst): | |
364 | self.changes[rev].ren_files[src] = dst |
|
364 | self.changes[rev].ren_files[src] = dst | |
365 | # Conversion from file to link or from link to file (modified) |
|
365 | # Conversion from file to link or from link to file (modified) | |
366 | elif l.startswith(b'ch'): |
|
366 | elif l.startswith(b'ch'): | |
367 | file = self._stripbasepath(l[2:].strip()) |
|
367 | file = self._stripbasepath(l[2:].strip()) | |
368 | if not self._exclude(file): |
|
368 | if not self._exclude(file): | |
369 | self.changes[rev].mod_files.append(file) |
|
369 | self.changes[rev].mod_files.append(file) | |
370 | # Renamed directory |
|
370 | # Renamed directory | |
371 | elif l.startswith(b'/>'): |
|
371 | elif l.startswith(b'/>'): | |
372 | dirs = l[2:].strip().split(b' ') |
|
372 | dirs = l[2:].strip().split(b' ') | |
373 | if len(dirs) == 1: |
|
373 | if len(dirs) == 1: | |
374 | dirs = l[2:].strip().split(b'\t') |
|
374 | dirs = l[2:].strip().split(b'\t') | |
375 | src = self._stripbasepath(dirs[0]) |
|
375 | src = self._stripbasepath(dirs[0]) | |
376 | dst = self._stripbasepath(dirs[1]) |
|
376 | dst = self._stripbasepath(dirs[1]) | |
377 | if not self._exclude(src) and not self._exclude(dst): |
|
377 | if not self._exclude(src) and not self._exclude(dst): | |
378 | self.changes[rev].ren_dirs[src] = dst |
|
378 | self.changes[rev].ren_dirs[src] = dst |
@@ -1,1565 +1,1565 b'' | |||||
1 | # Subversion 1.4/1.5 Python API backend |
|
1 | # Subversion 1.4/1.5 Python API backend | |
2 | # |
|
2 | # | |
3 | # Copyright(C) 2007 Daniel Holth et al |
|
3 | # Copyright(C) 2007 Daniel Holth et al | |
4 | from __future__ import absolute_import |
|
4 | from __future__ import absolute_import | |
5 |
|
5 | |||
6 | import os |
|
6 | import os | |
7 | import re |
|
7 | import re | |
8 | import xml.dom.minidom |
|
8 | import xml.dom.minidom | |
9 |
|
9 | |||
10 | from mercurial.i18n import _ |
|
10 | from mercurial.i18n import _ | |
11 | from mercurial.pycompat import open |
|
11 | from mercurial.pycompat import open | |
12 | from mercurial import ( |
|
12 | from mercurial import ( | |
13 | encoding, |
|
13 | encoding, | |
14 | error, |
|
14 | error, | |
15 | pycompat, |
|
15 | pycompat, | |
16 | util, |
|
16 | util, | |
17 | vfs as vfsmod, |
|
17 | vfs as vfsmod, | |
18 | ) |
|
18 | ) | |
19 | from mercurial.utils import ( |
|
19 | from mercurial.utils import ( | |
20 | dateutil, |
|
20 | dateutil, | |
21 | procutil, |
|
21 | procutil, | |
22 | stringutil, |
|
22 | stringutil, | |
23 | ) |
|
23 | ) | |
24 |
|
24 | |||
25 | from . import common |
|
25 | from . import common | |
26 |
|
26 | |||
27 | pickle = util.pickle |
|
27 | pickle = util.pickle | |
28 | stringio = util.stringio |
|
28 | stringio = util.stringio | |
29 | propertycache = util.propertycache |
|
29 | propertycache = util.propertycache | |
30 | urlerr = util.urlerr |
|
30 | urlerr = util.urlerr | |
31 | urlreq = util.urlreq |
|
31 | urlreq = util.urlreq | |
32 |
|
32 | |||
33 | commandline = common.commandline |
|
33 | commandline = common.commandline | |
34 | commit = common.commit |
|
34 | commit = common.commit | |
35 | converter_sink = common.converter_sink |
|
35 | converter_sink = common.converter_sink | |
36 | converter_source = common.converter_source |
|
36 | converter_source = common.converter_source | |
37 | decodeargs = common.decodeargs |
|
37 | decodeargs = common.decodeargs | |
38 | encodeargs = common.encodeargs |
|
38 | encodeargs = common.encodeargs | |
39 | makedatetimestamp = common.makedatetimestamp |
|
39 | makedatetimestamp = common.makedatetimestamp | |
40 | mapfile = common.mapfile |
|
40 | mapfile = common.mapfile | |
41 | MissingTool = common.MissingTool |
|
41 | MissingTool = common.MissingTool | |
42 | NoRepo = common.NoRepo |
|
42 | NoRepo = common.NoRepo | |
43 |
|
43 | |||
44 | # Subversion stuff. Works best with very recent Python SVN bindings |
|
44 | # Subversion stuff. Works best with very recent Python SVN bindings | |
45 | # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing |
|
45 | # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing | |
46 | # these bindings. |
|
46 | # these bindings. | |
47 |
|
47 | |||
48 | try: |
|
48 | try: | |
49 | import svn |
|
49 | import svn | |
50 | import svn.client |
|
50 | import svn.client | |
51 | import svn.core |
|
51 | import svn.core | |
52 | import svn.ra |
|
52 | import svn.ra | |
53 | import svn.delta |
|
53 | import svn.delta | |
54 | from . import transport |
|
54 | from . import transport | |
55 | import warnings |
|
55 | import warnings | |
56 |
|
56 | |||
57 | warnings.filterwarnings( |
|
57 | warnings.filterwarnings( | |
58 | b'ignore', module=b'svn.core', category=DeprecationWarning |
|
58 | b'ignore', module=b'svn.core', category=DeprecationWarning | |
59 | ) |
|
59 | ) | |
60 | svn.core.SubversionException # trigger import to catch error |
|
60 | svn.core.SubversionException # trigger import to catch error | |
61 |
|
61 | |||
62 | except ImportError: |
|
62 | except ImportError: | |
63 | svn = None |
|
63 | svn = None | |
64 |
|
64 | |||
65 |
|
65 | |||
66 | class SvnPathNotFound(Exception): |
|
66 | class SvnPathNotFound(Exception): | |
67 | pass |
|
67 | pass | |
68 |
|
68 | |||
69 |
|
69 | |||
70 | def revsplit(rev): |
|
70 | def revsplit(rev): | |
71 | """Parse a revision string and return (uuid, path, revnum). |
|
71 | """Parse a revision string and return (uuid, path, revnum). | |
72 | >>> revsplit(b'svn:a2147622-4a9f-4db4-a8d3-13562ff547b2' |
|
72 | >>> revsplit(b'svn:a2147622-4a9f-4db4-a8d3-13562ff547b2' | |
73 | ... b'/proj%20B/mytrunk/mytrunk@1') |
|
73 | ... b'/proj%20B/mytrunk/mytrunk@1') | |
74 | ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1) |
|
74 | ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1) | |
75 | >>> revsplit(b'svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1') |
|
75 | >>> revsplit(b'svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1') | |
76 | ('', '', 1) |
|
76 | ('', '', 1) | |
77 | >>> revsplit(b'@7') |
|
77 | >>> revsplit(b'@7') | |
78 | ('', '', 7) |
|
78 | ('', '', 7) | |
79 | >>> revsplit(b'7') |
|
79 | >>> revsplit(b'7') | |
80 | ('', '', 0) |
|
80 | ('', '', 0) | |
81 | >>> revsplit(b'bad') |
|
81 | >>> revsplit(b'bad') | |
82 | ('', '', 0) |
|
82 | ('', '', 0) | |
83 | """ |
|
83 | """ | |
84 | parts = rev.rsplit(b'@', 1) |
|
84 | parts = rev.rsplit(b'@', 1) | |
85 | revnum = 0 |
|
85 | revnum = 0 | |
86 | if len(parts) > 1: |
|
86 | if len(parts) > 1: | |
87 | revnum = int(parts[1]) |
|
87 | revnum = int(parts[1]) | |
88 | parts = parts[0].split(b'/', 1) |
|
88 | parts = parts[0].split(b'/', 1) | |
89 | uuid = b'' |
|
89 | uuid = b'' | |
90 | mod = b'' |
|
90 | mod = b'' | |
91 | if len(parts) > 1 and parts[0].startswith(b'svn:'): |
|
91 | if len(parts) > 1 and parts[0].startswith(b'svn:'): | |
92 | uuid = parts[0][4:] |
|
92 | uuid = parts[0][4:] | |
93 | mod = b'/' + parts[1] |
|
93 | mod = b'/' + parts[1] | |
94 | return uuid, mod, revnum |
|
94 | return uuid, mod, revnum | |
95 |
|
95 | |||
96 |
|
96 | |||
97 | def quote(s): |
|
97 | def quote(s): | |
98 | # As of svn 1.7, many svn calls expect "canonical" paths. In |
|
98 | # As of svn 1.7, many svn calls expect "canonical" paths. In | |
99 | # theory, we should call svn.core.*canonicalize() on all paths |
|
99 | # theory, we should call svn.core.*canonicalize() on all paths | |
100 | # before passing them to the API. Instead, we assume the base url |
|
100 | # before passing them to the API. Instead, we assume the base url | |
101 | # is canonical and copy the behaviour of svn URL encoding function |
|
101 | # is canonical and copy the behaviour of svn URL encoding function | |
102 | # so we can extend it safely with new components. The "safe" |
|
102 | # so we can extend it safely with new components. The "safe" | |
103 | # characters were taken from the "svn_uri__char_validity" table in |
|
103 | # characters were taken from the "svn_uri__char_validity" table in | |
104 | # libsvn_subr/path.c. |
|
104 | # libsvn_subr/path.c. | |
105 | return urlreq.quote(s, b"!$&'()*+,-./:=@_~") |
|
105 | return urlreq.quote(s, b"!$&'()*+,-./:=@_~") | |
106 |
|
106 | |||
107 |
|
107 | |||
108 | def geturl(path): |
|
108 | def geturl(path): | |
109 | try: |
|
109 | try: | |
110 | return svn.client.url_from_path(svn.core.svn_path_canonicalize(path)) |
|
110 | return svn.client.url_from_path(svn.core.svn_path_canonicalize(path)) | |
111 | except svn.core.SubversionException: |
|
111 | except svn.core.SubversionException: | |
112 | # svn.client.url_from_path() fails with local repositories |
|
112 | # svn.client.url_from_path() fails with local repositories | |
113 | pass |
|
113 | pass | |
114 | if os.path.isdir(path): |
|
114 | if os.path.isdir(path): | |
115 | path = os.path.normpath(os.path.abspath(path)) |
|
115 | path = os.path.normpath(os.path.abspath(path)) | |
116 | if pycompat.iswindows: |
|
116 | if pycompat.iswindows: | |
117 | path = b'/' + util.normpath(path) |
|
117 | path = b'/' + util.normpath(path) | |
118 | # Module URL is later compared with the repository URL returned |
|
118 | # Module URL is later compared with the repository URL returned | |
119 | # by svn API, which is UTF-8. |
|
119 | # by svn API, which is UTF-8. | |
120 | path = encoding.tolocal(path) |
|
120 | path = encoding.tolocal(path) | |
121 | path = b'file://%s' % quote(path) |
|
121 | path = b'file://%s' % quote(path) | |
122 | return svn.core.svn_path_canonicalize(path) |
|
122 | return svn.core.svn_path_canonicalize(path) | |
123 |
|
123 | |||
124 |
|
124 | |||
125 | def optrev(number): |
|
125 | def optrev(number): | |
126 | optrev = svn.core.svn_opt_revision_t() |
|
126 | optrev = svn.core.svn_opt_revision_t() | |
127 | optrev.kind = svn.core.svn_opt_revision_number |
|
127 | optrev.kind = svn.core.svn_opt_revision_number | |
128 | optrev.value.number = number |
|
128 | optrev.value.number = number | |
129 | return optrev |
|
129 | return optrev | |
130 |
|
130 | |||
131 |
|
131 | |||
132 | class changedpath(object): |
|
132 | class changedpath(object): | |
133 | def __init__(self, p): |
|
133 | def __init__(self, p): | |
134 | self.copyfrom_path = p.copyfrom_path |
|
134 | self.copyfrom_path = p.copyfrom_path | |
135 | self.copyfrom_rev = p.copyfrom_rev |
|
135 | self.copyfrom_rev = p.copyfrom_rev | |
136 | self.action = p.action |
|
136 | self.action = p.action | |
137 |
|
137 | |||
138 |
|
138 | |||
139 | def get_log_child( |
|
139 | def get_log_child( | |
140 | fp, |
|
140 | fp, | |
141 | url, |
|
141 | url, | |
142 | paths, |
|
142 | paths, | |
143 | start, |
|
143 | start, | |
144 | end, |
|
144 | end, | |
145 | limit=0, |
|
145 | limit=0, | |
146 | discover_changed_paths=True, |
|
146 | discover_changed_paths=True, | |
147 | strict_node_history=False, |
|
147 | strict_node_history=False, | |
148 | ): |
|
148 | ): | |
149 | protocol = -1 |
|
149 | protocol = -1 | |
150 |
|
150 | |||
151 | def receiver(orig_paths, revnum, author, date, message, pool): |
|
151 | def receiver(orig_paths, revnum, author, date, message, pool): | |
152 | paths = {} |
|
152 | paths = {} | |
153 | if orig_paths is not None: |
|
153 | if orig_paths is not None: | |
154 | for k, v in pycompat.iteritems(orig_paths): |
|
154 | for k, v in pycompat.iteritems(orig_paths): | |
155 | paths[k] = changedpath(v) |
|
155 | paths[k] = changedpath(v) | |
156 | pickle.dump((paths, revnum, author, date, message), fp, protocol) |
|
156 | pickle.dump((paths, revnum, author, date, message), fp, protocol) | |
157 |
|
157 | |||
158 | try: |
|
158 | try: | |
159 | # Use an ra of our own so that our parent can consume |
|
159 | # Use an ra of our own so that our parent can consume | |
160 | # our results without confusing the server. |
|
160 | # our results without confusing the server. | |
161 | t = transport.SvnRaTransport(url=url) |
|
161 | t = transport.SvnRaTransport(url=url) | |
162 | svn.ra.get_log( |
|
162 | svn.ra.get_log( | |
163 | t.ra, |
|
163 | t.ra, | |
164 | paths, |
|
164 | paths, | |
165 | start, |
|
165 | start, | |
166 | end, |
|
166 | end, | |
167 | limit, |
|
167 | limit, | |
168 | discover_changed_paths, |
|
168 | discover_changed_paths, | |
169 | strict_node_history, |
|
169 | strict_node_history, | |
170 | receiver, |
|
170 | receiver, | |
171 | ) |
|
171 | ) | |
172 | except IOError: |
|
172 | except IOError: | |
173 | # Caller may interrupt the iteration |
|
173 | # Caller may interrupt the iteration | |
174 | pickle.dump(None, fp, protocol) |
|
174 | pickle.dump(None, fp, protocol) | |
175 | except Exception as inst: |
|
175 | except Exception as inst: | |
176 | pickle.dump(stringutil.forcebytestr(inst), fp, protocol) |
|
176 | pickle.dump(stringutil.forcebytestr(inst), fp, protocol) | |
177 | else: |
|
177 | else: | |
178 | pickle.dump(None, fp, protocol) |
|
178 | pickle.dump(None, fp, protocol) | |
179 | fp.flush() |
|
179 | fp.flush() | |
180 | # With large history, cleanup process goes crazy and suddenly |
|
180 | # With large history, cleanup process goes crazy and suddenly | |
181 | # consumes *huge* amount of memory. The output file being closed, |
|
181 | # consumes *huge* amount of memory. The output file being closed, | |
182 | # there is no need for clean termination. |
|
182 | # there is no need for clean termination. | |
183 | os._exit(0) |
|
183 | os._exit(0) | |
184 |
|
184 | |||
185 |
|
185 | |||
186 | def debugsvnlog(ui, **opts): |
|
186 | def debugsvnlog(ui, **opts): | |
187 | """Fetch SVN log in a subprocess and channel them back to parent to |
|
187 | """Fetch SVN log in a subprocess and channel them back to parent to | |
188 | avoid memory collection issues. |
|
188 | avoid memory collection issues. | |
189 | """ |
|
189 | """ | |
190 | if svn is None: |
|
190 | if svn is None: | |
191 | raise error.Abort( |
|
191 | raise error.Abort( | |
192 | _(b'debugsvnlog could not load Subversion python bindings') |
|
192 | _(b'debugsvnlog could not load Subversion python bindings') | |
193 | ) |
|
193 | ) | |
194 |
|
194 | |||
195 | args = decodeargs(ui.fin.read()) |
|
195 | args = decodeargs(ui.fin.read()) | |
196 | get_log_child(ui.fout, *args) |
|
196 | get_log_child(ui.fout, *args) | |
197 |
|
197 | |||
198 |
|
198 | |||
199 | class logstream(object): |
|
199 | class logstream(object): | |
200 | """Interruptible revision log iterator.""" |
|
200 | """Interruptible revision log iterator.""" | |
201 |
|
201 | |||
202 | def __init__(self, stdout): |
|
202 | def __init__(self, stdout): | |
203 | self._stdout = stdout |
|
203 | self._stdout = stdout | |
204 |
|
204 | |||
205 | def __iter__(self): |
|
205 | def __iter__(self): | |
206 | while True: |
|
206 | while True: | |
207 | try: |
|
207 | try: | |
208 | entry = pickle.load(self._stdout) |
|
208 | entry = pickle.load(self._stdout) | |
209 | except EOFError: |
|
209 | except EOFError: | |
210 | raise error.Abort( |
|
210 | raise error.Abort( | |
211 | _( |
|
211 | _( | |
212 | b'Mercurial failed to run itself, check' |
|
212 | b'Mercurial failed to run itself, check' | |
213 | b' hg executable is in PATH' |
|
213 | b' hg executable is in PATH' | |
214 | ) |
|
214 | ) | |
215 | ) |
|
215 | ) | |
216 | try: |
|
216 | try: | |
217 | orig_paths, revnum, author, date, message = entry |
|
217 | orig_paths, revnum, author, date, message = entry | |
218 | except (TypeError, ValueError): |
|
218 | except (TypeError, ValueError): | |
219 | if entry is None: |
|
219 | if entry is None: | |
220 | break |
|
220 | break | |
221 | raise error.Abort(_(b"log stream exception '%s'") % entry) |
|
221 | raise error.Abort(_(b"log stream exception '%s'") % entry) | |
222 | yield entry |
|
222 | yield entry | |
223 |
|
223 | |||
224 | def close(self): |
|
224 | def close(self): | |
225 | if self._stdout: |
|
225 | if self._stdout: | |
226 | self._stdout.close() |
|
226 | self._stdout.close() | |
227 | self._stdout = None |
|
227 | self._stdout = None | |
228 |
|
228 | |||
229 |
|
229 | |||
230 | class directlogstream(list): |
|
230 | class directlogstream(list): | |
231 | """Direct revision log iterator. |
|
231 | """Direct revision log iterator. | |
232 | This can be used for debugging and development but it will probably leak |
|
232 | This can be used for debugging and development but it will probably leak | |
233 | memory and is not suitable for real conversions.""" |
|
233 | memory and is not suitable for real conversions.""" | |
234 |
|
234 | |||
235 | def __init__( |
|
235 | def __init__( | |
236 | self, |
|
236 | self, | |
237 | url, |
|
237 | url, | |
238 | paths, |
|
238 | paths, | |
239 | start, |
|
239 | start, | |
240 | end, |
|
240 | end, | |
241 | limit=0, |
|
241 | limit=0, | |
242 | discover_changed_paths=True, |
|
242 | discover_changed_paths=True, | |
243 | strict_node_history=False, |
|
243 | strict_node_history=False, | |
244 | ): |
|
244 | ): | |
245 | def receiver(orig_paths, revnum, author, date, message, pool): |
|
245 | def receiver(orig_paths, revnum, author, date, message, pool): | |
246 | paths = {} |
|
246 | paths = {} | |
247 | if orig_paths is not None: |
|
247 | if orig_paths is not None: | |
248 | for k, v in pycompat.iteritems(orig_paths): |
|
248 | for k, v in pycompat.iteritems(orig_paths): | |
249 | paths[k] = changedpath(v) |
|
249 | paths[k] = changedpath(v) | |
250 | self.append((paths, revnum, author, date, message)) |
|
250 | self.append((paths, revnum, author, date, message)) | |
251 |
|
251 | |||
252 | # Use an ra of our own so that our parent can consume |
|
252 | # Use an ra of our own so that our parent can consume | |
253 | # our results without confusing the server. |
|
253 | # our results without confusing the server. | |
254 | t = transport.SvnRaTransport(url=url) |
|
254 | t = transport.SvnRaTransport(url=url) | |
255 | svn.ra.get_log( |
|
255 | svn.ra.get_log( | |
256 | t.ra, |
|
256 | t.ra, | |
257 | paths, |
|
257 | paths, | |
258 | start, |
|
258 | start, | |
259 | end, |
|
259 | end, | |
260 | limit, |
|
260 | limit, | |
261 | discover_changed_paths, |
|
261 | discover_changed_paths, | |
262 | strict_node_history, |
|
262 | strict_node_history, | |
263 | receiver, |
|
263 | receiver, | |
264 | ) |
|
264 | ) | |
265 |
|
265 | |||
266 | def close(self): |
|
266 | def close(self): | |
267 | pass |
|
267 | pass | |
268 |
|
268 | |||
269 |
|
269 | |||
270 | # Check to see if the given path is a local Subversion repo. Verify this by |
|
270 | # Check to see if the given path is a local Subversion repo. Verify this by | |
271 | # looking for several svn-specific files and directories in the given |
|
271 | # looking for several svn-specific files and directories in the given | |
272 | # directory. |
|
272 | # directory. | |
273 | def filecheck(ui, path, proto): |
|
273 | def filecheck(ui, path, proto): | |
274 | for x in (b'locks', b'hooks', b'format', b'db'): |
|
274 | for x in (b'locks', b'hooks', b'format', b'db'): | |
275 | if not os.path.exists(os.path.join(path, x)): |
|
275 | if not os.path.exists(os.path.join(path, x)): | |
276 | return False |
|
276 | return False | |
277 | return True |
|
277 | return True | |
278 |
|
278 | |||
279 |
|
279 | |||
280 | # Check to see if a given path is the root of an svn repo over http. We verify |
|
280 | # Check to see if a given path is the root of an svn repo over http. We verify | |
281 | # this by requesting a version-controlled URL we know can't exist and looking |
|
281 | # this by requesting a version-controlled URL we know can't exist and looking | |
282 | # for the svn-specific "not found" XML. |
|
282 | # for the svn-specific "not found" XML. | |
283 | def httpcheck(ui, path, proto): |
|
283 | def httpcheck(ui, path, proto): | |
284 | try: |
|
284 | try: | |
285 | opener = urlreq.buildopener() |
|
285 | opener = urlreq.buildopener() | |
286 | rsp = opener.open(b'%s://%s/!svn/ver/0/.svn' % (proto, path), b'rb') |
|
286 | rsp = opener.open(b'%s://%s/!svn/ver/0/.svn' % (proto, path), b'rb') | |
287 | data = rsp.read() |
|
287 | data = rsp.read() | |
288 | except urlerr.httperror as inst: |
|
288 | except urlerr.httperror as inst: | |
289 | if inst.code != 404: |
|
289 | if inst.code != 404: | |
290 | # Except for 404 we cannot know for sure this is not an svn repo |
|
290 | # Except for 404 we cannot know for sure this is not an svn repo | |
291 | ui.warn( |
|
291 | ui.warn( | |
292 | _( |
|
292 | _( | |
293 | b'svn: cannot probe remote repository, assume it could ' |
|
293 | b'svn: cannot probe remote repository, assume it could ' | |
294 | b'be a subversion repository. Use --source-type if you ' |
|
294 | b'be a subversion repository. Use --source-type if you ' | |
295 | b'know better.\n' |
|
295 | b'know better.\n' | |
296 | ) |
|
296 | ) | |
297 | ) |
|
297 | ) | |
298 | return True |
|
298 | return True | |
299 | data = inst.fp.read() |
|
299 | data = inst.fp.read() | |
300 | except Exception: |
|
300 | except Exception: | |
301 | # Could be urlerr.urlerror if the URL is invalid or anything else. |
|
301 | # Could be urlerr.urlerror if the URL is invalid or anything else. | |
302 | return False |
|
302 | return False | |
303 | return b'<m:human-readable errcode="160013">' in data |
|
303 | return b'<m:human-readable errcode="160013">' in data | |
304 |
|
304 | |||
305 |
|
305 | |||
306 | protomap = { |
|
306 | protomap = { | |
307 | b'http': httpcheck, |
|
307 | b'http': httpcheck, | |
308 | b'https': httpcheck, |
|
308 | b'https': httpcheck, | |
309 | b'file': filecheck, |
|
309 | b'file': filecheck, | |
310 | } |
|
310 | } | |
311 |
|
311 | |||
312 |
|
312 | |||
313 | def issvnurl(ui, url): |
|
313 | def issvnurl(ui, url): | |
314 | try: |
|
314 | try: | |
315 | proto, path = url.split(b'://', 1) |
|
315 | proto, path = url.split(b'://', 1) | |
316 | if proto == b'file': |
|
316 | if proto == b'file': | |
317 | if ( |
|
317 | if ( | |
318 | pycompat.iswindows |
|
318 | pycompat.iswindows | |
319 | and path[:1] == b'/' |
|
319 | and path[:1] == b'/' | |
320 | and path[1:2].isalpha() |
|
320 | and path[1:2].isalpha() | |
321 | and path[2:6].lower() == b'%3a/' |
|
321 | and path[2:6].lower() == b'%3a/' | |
322 | ): |
|
322 | ): | |
323 | path = path[:2] + b':/' + path[6:] |
|
323 | path = path[:2] + b':/' + path[6:] | |
324 | path = urlreq.url2pathname(path) |
|
324 | path = urlreq.url2pathname(path) | |
325 | except ValueError: |
|
325 | except ValueError: | |
326 | proto = b'file' |
|
326 | proto = b'file' | |
327 | path = os.path.abspath(url) |
|
327 | path = os.path.abspath(url) | |
328 | if proto == b'file': |
|
328 | if proto == b'file': | |
329 | path = util.pconvert(path) |
|
329 | path = util.pconvert(path) | |
330 | check = protomap.get(proto, lambda *args: False) |
|
330 | check = protomap.get(proto, lambda *args: False) | |
331 | while b'/' in path: |
|
331 | while b'/' in path: | |
332 | if check(ui, path, proto): |
|
332 | if check(ui, path, proto): | |
333 | return True |
|
333 | return True | |
334 | path = path.rsplit(b'/', 1)[0] |
|
334 | path = path.rsplit(b'/', 1)[0] | |
335 | return False |
|
335 | return False | |
336 |
|
336 | |||
337 |
|
337 | |||
338 | # SVN conversion code stolen from bzr-svn and tailor |
|
338 | # SVN conversion code stolen from bzr-svn and tailor | |
339 | # |
|
339 | # | |
340 | # Subversion looks like a versioned filesystem, branches structures |
|
340 | # Subversion looks like a versioned filesystem, branches structures | |
341 | # are defined by conventions and not enforced by the tool. First, |
|
341 | # are defined by conventions and not enforced by the tool. First, | |
342 | # we define the potential branches (modules) as "trunk" and "branches" |
|
342 | # we define the potential branches (modules) as "trunk" and "branches" | |
343 | # children directories. Revisions are then identified by their |
|
343 | # children directories. Revisions are then identified by their | |
344 | # module and revision number (and a repository identifier). |
|
344 | # module and revision number (and a repository identifier). | |
345 | # |
|
345 | # | |
346 | # The revision graph is really a tree (or a forest). By default, a |
|
346 | # The revision graph is really a tree (or a forest). By default, a | |
347 | # revision parent is the previous revision in the same module. If the |
|
347 | # revision parent is the previous revision in the same module. If the | |
348 | # module directory is copied/moved from another module then the |
|
348 | # module directory is copied/moved from another module then the | |
349 | # revision is the module root and its parent the source revision in |
|
349 | # revision is the module root and its parent the source revision in | |
350 | # the parent module. A revision has at most one parent. |
|
350 | # the parent module. A revision has at most one parent. | |
351 | # |
|
351 | # | |
352 | class svn_source(converter_source): |
|
352 | class svn_source(converter_source): | |
353 | def __init__(self, ui, repotype, url, revs=None): |
|
353 | def __init__(self, ui, repotype, url, revs=None): | |
354 | super(svn_source, self).__init__(ui, repotype, url, revs=revs) |
|
354 | super(svn_source, self).__init__(ui, repotype, url, revs=revs) | |
355 |
|
355 | |||
356 | if not ( |
|
356 | if not ( | |
357 | url.startswith(b'svn://') |
|
357 | url.startswith(b'svn://') | |
358 | or url.startswith(b'svn+ssh://') |
|
358 | or url.startswith(b'svn+ssh://') | |
359 | or ( |
|
359 | or ( | |
360 | os.path.exists(url) |
|
360 | os.path.exists(url) | |
361 | and os.path.exists(os.path.join(url, b'.svn')) |
|
361 | and os.path.exists(os.path.join(url, b'.svn')) | |
362 | ) |
|
362 | ) | |
363 | or issvnurl(ui, url) |
|
363 | or issvnurl(ui, url) | |
364 | ): |
|
364 | ): | |
365 | raise NoRepo( |
|
365 | raise NoRepo( | |
366 | _(b"%s does not look like a Subversion repository") % url |
|
366 | _(b"%s does not look like a Subversion repository") % url | |
367 | ) |
|
367 | ) | |
368 | if svn is None: |
|
368 | if svn is None: | |
369 | raise MissingTool(_(b'could not load Subversion python bindings')) |
|
369 | raise MissingTool(_(b'could not load Subversion python bindings')) | |
370 |
|
370 | |||
371 | try: |
|
371 | try: | |
372 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR |
|
372 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR | |
373 | if version < (1, 4): |
|
373 | if version < (1, 4): | |
374 | raise MissingTool( |
|
374 | raise MissingTool( | |
375 | _( |
|
375 | _( | |
376 | b'Subversion python bindings %d.%d found, ' |
|
376 | b'Subversion python bindings %d.%d found, ' | |
377 | b'1.4 or later required' |
|
377 | b'1.4 or later required' | |
378 | ) |
|
378 | ) | |
379 | % version |
|
379 | % version | |
380 | ) |
|
380 | ) | |
381 | except AttributeError: |
|
381 | except AttributeError: | |
382 | raise MissingTool( |
|
382 | raise MissingTool( | |
383 | _( |
|
383 | _( | |
384 | b'Subversion python bindings are too old, 1.4 ' |
|
384 | b'Subversion python bindings are too old, 1.4 ' | |
385 | b'or later required' |
|
385 | b'or later required' | |
386 | ) |
|
386 | ) | |
387 | ) |
|
387 | ) | |
388 |
|
388 | |||
389 | self.lastrevs = {} |
|
389 | self.lastrevs = {} | |
390 |
|
390 | |||
391 | latest = None |
|
391 | latest = None | |
392 | try: |
|
392 | try: | |
393 | # Support file://path@rev syntax. Useful e.g. to convert |
|
393 | # Support file://path@rev syntax. Useful e.g. to convert | |
394 | # deleted branches. |
|
394 | # deleted branches. | |
395 | at = url.rfind(b'@') |
|
395 | at = url.rfind(b'@') | |
396 | if at >= 0: |
|
396 | if at >= 0: | |
397 | latest = int(url[at + 1 :]) |
|
397 | latest = int(url[at + 1 :]) | |
398 | url = url[:at] |
|
398 | url = url[:at] | |
399 | except ValueError: |
|
399 | except ValueError: | |
400 | pass |
|
400 | pass | |
401 | self.url = geturl(url) |
|
401 | self.url = geturl(url) | |
402 | self.encoding = b'UTF-8' # Subversion is always nominal UTF-8 |
|
402 | self.encoding = b'UTF-8' # Subversion is always nominal UTF-8 | |
403 | try: |
|
403 | try: | |
404 | self.transport = transport.SvnRaTransport(url=self.url) |
|
404 | self.transport = transport.SvnRaTransport(url=self.url) | |
405 | self.ra = self.transport.ra |
|
405 | self.ra = self.transport.ra | |
406 | self.ctx = self.transport.client |
|
406 | self.ctx = self.transport.client | |
407 | self.baseurl = svn.ra.get_repos_root(self.ra) |
|
407 | self.baseurl = svn.ra.get_repos_root(self.ra) | |
408 | # Module is either empty or a repository path starting with |
|
408 | # Module is either empty or a repository path starting with | |
409 | # a slash and not ending with a slash. |
|
409 | # a slash and not ending with a slash. | |
410 | self.module = urlreq.unquote(self.url[len(self.baseurl) :]) |
|
410 | self.module = urlreq.unquote(self.url[len(self.baseurl) :]) | |
411 | self.prevmodule = None |
|
411 | self.prevmodule = None | |
412 | self.rootmodule = self.module |
|
412 | self.rootmodule = self.module | |
413 | self.commits = {} |
|
413 | self.commits = {} | |
414 | self.paths = {} |
|
414 | self.paths = {} | |
415 | self.uuid = svn.ra.get_uuid(self.ra) |
|
415 | self.uuid = svn.ra.get_uuid(self.ra) | |
416 | except svn.core.SubversionException: |
|
416 | except svn.core.SubversionException: | |
417 | ui.traceback() |
|
417 | ui.traceback() | |
418 | svnversion = b'%d.%d.%d' % ( |
|
418 | svnversion = b'%d.%d.%d' % ( | |
419 | svn.core.SVN_VER_MAJOR, |
|
419 | svn.core.SVN_VER_MAJOR, | |
420 | svn.core.SVN_VER_MINOR, |
|
420 | svn.core.SVN_VER_MINOR, | |
421 | svn.core.SVN_VER_MICRO, |
|
421 | svn.core.SVN_VER_MICRO, | |
422 | ) |
|
422 | ) | |
423 | raise NoRepo( |
|
423 | raise NoRepo( | |
424 | _( |
|
424 | _( | |
425 | b"%s does not look like a Subversion repository " |
|
425 | b"%s does not look like a Subversion repository " | |
426 | b"to libsvn version %s" |
|
426 | b"to libsvn version %s" | |
427 | ) |
|
427 | ) | |
428 | % (self.url, svnversion) |
|
428 | % (self.url, svnversion) | |
429 | ) |
|
429 | ) | |
430 |
|
430 | |||
431 | if revs: |
|
431 | if revs: | |
432 | if len(revs) > 1: |
|
432 | if len(revs) > 1: | |
433 | raise error.Abort( |
|
433 | raise error.Abort( | |
434 | _( |
|
434 | _( | |
435 | b'subversion source does not support ' |
|
435 | b'subversion source does not support ' | |
436 | b'specifying multiple revisions' |
|
436 | b'specifying multiple revisions' | |
437 | ) |
|
437 | ) | |
438 | ) |
|
438 | ) | |
439 | try: |
|
439 | try: | |
440 | latest = int(revs[0]) |
|
440 | latest = int(revs[0]) | |
441 | except ValueError: |
|
441 | except ValueError: | |
442 | raise error.Abort( |
|
442 | raise error.Abort( | |
443 | _(b'svn: revision %s is not an integer') % revs[0] |
|
443 | _(b'svn: revision %s is not an integer') % revs[0] | |
444 | ) |
|
444 | ) | |
445 |
|
445 | |||
446 | trunkcfg = self.ui.config(b'convert', b'svn.trunk') |
|
446 | trunkcfg = self.ui.config(b'convert', b'svn.trunk') | |
447 | if trunkcfg is None: |
|
447 | if trunkcfg is None: | |
448 | trunkcfg = b'trunk' |
|
448 | trunkcfg = b'trunk' | |
449 | self.trunkname = trunkcfg.strip(b'/') |
|
449 | self.trunkname = trunkcfg.strip(b'/') | |
450 | self.startrev = self.ui.config(b'convert', b'svn.startrev') |
|
450 | self.startrev = self.ui.config(b'convert', b'svn.startrev') | |
451 | try: |
|
451 | try: | |
452 | self.startrev = int(self.startrev) |
|
452 | self.startrev = int(self.startrev) | |
453 | if self.startrev < 0: |
|
453 | if self.startrev < 0: | |
454 | self.startrev = 0 |
|
454 | self.startrev = 0 | |
455 | except ValueError: |
|
455 | except ValueError: | |
456 | raise error.Abort( |
|
456 | raise error.Abort( | |
457 | _(b'svn: start revision %s is not an integer') % self.startrev |
|
457 | _(b'svn: start revision %s is not an integer') % self.startrev | |
458 | ) |
|
458 | ) | |
459 |
|
459 | |||
460 | try: |
|
460 | try: | |
461 | self.head = self.latest(self.module, latest) |
|
461 | self.head = self.latest(self.module, latest) | |
462 | except SvnPathNotFound: |
|
462 | except SvnPathNotFound: | |
463 | self.head = None |
|
463 | self.head = None | |
464 | if not self.head: |
|
464 | if not self.head: | |
465 | raise error.Abort( |
|
465 | raise error.Abort( | |
466 | _(b'no revision found in module %s') % self.module |
|
466 | _(b'no revision found in module %s') % self.module | |
467 | ) |
|
467 | ) | |
468 | self.last_changed = self.revnum(self.head) |
|
468 | self.last_changed = self.revnum(self.head) | |
469 |
|
469 | |||
470 | self._changescache = (None, None) |
|
470 | self._changescache = (None, None) | |
471 |
|
471 | |||
472 | if os.path.exists(os.path.join(url, b'.svn/entries')): |
|
472 | if os.path.exists(os.path.join(url, b'.svn/entries')): | |
473 | self.wc = url |
|
473 | self.wc = url | |
474 | else: |
|
474 | else: | |
475 | self.wc = None |
|
475 | self.wc = None | |
476 | self.convertfp = None |
|
476 | self.convertfp = None | |
477 |
|
477 | |||
478 | def setrevmap(self, revmap): |
|
478 | def setrevmap(self, revmap): | |
479 | lastrevs = {} |
|
479 | lastrevs = {} | |
480 | for revid in revmap: |
|
480 | for revid in revmap: | |
481 | uuid, module, revnum = revsplit(revid) |
|
481 | uuid, module, revnum = revsplit(revid) | |
482 | lastrevnum = lastrevs.setdefault(module, revnum) |
|
482 | lastrevnum = lastrevs.setdefault(module, revnum) | |
483 | if revnum > lastrevnum: |
|
483 | if revnum > lastrevnum: | |
484 | lastrevs[module] = revnum |
|
484 | lastrevs[module] = revnum | |
485 | self.lastrevs = lastrevs |
|
485 | self.lastrevs = lastrevs | |
486 |
|
486 | |||
487 | def exists(self, path, optrev): |
|
487 | def exists(self, path, optrev): | |
488 | try: |
|
488 | try: | |
489 | svn.client.ls( |
|
489 | svn.client.ls( | |
490 | self.url.rstrip(b'/') + b'/' + quote(path), |
|
490 | self.url.rstrip(b'/') + b'/' + quote(path), | |
491 | optrev, |
|
491 | optrev, | |
492 | False, |
|
492 | False, | |
493 | self.ctx, |
|
493 | self.ctx, | |
494 | ) |
|
494 | ) | |
495 | return True |
|
495 | return True | |
496 | except svn.core.SubversionException: |
|
496 | except svn.core.SubversionException: | |
497 | return False |
|
497 | return False | |
498 |
|
498 | |||
499 | def getheads(self): |
|
499 | def getheads(self): | |
500 | def isdir(path, revnum): |
|
500 | def isdir(path, revnum): | |
501 | kind = self._checkpath(path, revnum) |
|
501 | kind = self._checkpath(path, revnum) | |
502 | return kind == svn.core.svn_node_dir |
|
502 | return kind == svn.core.svn_node_dir | |
503 |
|
503 | |||
504 | def getcfgpath(name, rev): |
|
504 | def getcfgpath(name, rev): | |
505 | cfgpath = self.ui.config(b'convert', b'svn.' + name) |
|
505 | cfgpath = self.ui.config(b'convert', b'svn.' + name) | |
506 | if cfgpath is not None and cfgpath.strip() == b'': |
|
506 | if cfgpath is not None and cfgpath.strip() == b'': | |
507 | return None |
|
507 | return None | |
508 | path = (cfgpath or name).strip(b'/') |
|
508 | path = (cfgpath or name).strip(b'/') | |
509 | if not self.exists(path, rev): |
|
509 | if not self.exists(path, rev): | |
510 | if self.module.endswith(path) and name == b'trunk': |
|
510 | if self.module.endswith(path) and name == b'trunk': | |
511 | # we are converting from inside this directory |
|
511 | # we are converting from inside this directory | |
512 | return None |
|
512 | return None | |
513 | if cfgpath: |
|
513 | if cfgpath: | |
514 | raise error.Abort( |
|
514 | raise error.Abort( | |
515 | _(b'expected %s to be at %r, but not found') |
|
515 | _(b'expected %s to be at %r, but not found') | |
516 | % (name, path) |
|
516 | % (name, path) | |
517 | ) |
|
517 | ) | |
518 | return None |
|
518 | return None | |
519 | self.ui.note(_(b'found %s at %r\n') % (name, path)) |
|
519 | self.ui.note(_(b'found %s at %r\n') % (name, path)) | |
520 | return path |
|
520 | return path | |
521 |
|
521 | |||
522 | rev = optrev(self.last_changed) |
|
522 | rev = optrev(self.last_changed) | |
523 | oldmodule = b'' |
|
523 | oldmodule = b'' | |
524 | trunk = getcfgpath(b'trunk', rev) |
|
524 | trunk = getcfgpath(b'trunk', rev) | |
525 | self.tags = getcfgpath(b'tags', rev) |
|
525 | self.tags = getcfgpath(b'tags', rev) | |
526 | branches = getcfgpath(b'branches', rev) |
|
526 | branches = getcfgpath(b'branches', rev) | |
527 |
|
527 | |||
528 | # If the project has a trunk or branches, we will extract heads |
|
528 | # If the project has a trunk or branches, we will extract heads | |
529 | # from them. We keep the project root otherwise. |
|
529 | # from them. We keep the project root otherwise. | |
530 | if trunk: |
|
530 | if trunk: | |
531 | oldmodule = self.module or b'' |
|
531 | oldmodule = self.module or b'' | |
532 | self.module += b'/' + trunk |
|
532 | self.module += b'/' + trunk | |
533 | self.head = self.latest(self.module, self.last_changed) |
|
533 | self.head = self.latest(self.module, self.last_changed) | |
534 | if not self.head: |
|
534 | if not self.head: | |
535 | raise error.Abort( |
|
535 | raise error.Abort( | |
536 | _(b'no revision found in module %s') % self.module |
|
536 | _(b'no revision found in module %s') % self.module | |
537 | ) |
|
537 | ) | |
538 |
|
538 | |||
539 | # First head in the list is the module's head |
|
539 | # First head in the list is the module's head | |
540 | self.heads = [self.head] |
|
540 | self.heads = [self.head] | |
541 | if self.tags is not None: |
|
541 | if self.tags is not None: | |
542 | self.tags = b'%s/%s' % (oldmodule, (self.tags or b'tags')) |
|
542 | self.tags = b'%s/%s' % (oldmodule, (self.tags or b'tags')) | |
543 |
|
543 | |||
544 | # Check if branches bring a few more heads to the list |
|
544 | # Check if branches bring a few more heads to the list | |
545 | if branches: |
|
545 | if branches: | |
546 | rpath = self.url.strip(b'/') |
|
546 | rpath = self.url.strip(b'/') | |
547 | branchnames = svn.client.ls( |
|
547 | branchnames = svn.client.ls( | |
548 | rpath + b'/' + quote(branches), rev, False, self.ctx |
|
548 | rpath + b'/' + quote(branches), rev, False, self.ctx | |
549 | ) |
|
549 | ) | |
550 | for branch in sorted(branchnames): |
|
550 | for branch in sorted(branchnames): | |
551 | module = b'%s/%s/%s' % (oldmodule, branches, branch) |
|
551 | module = b'%s/%s/%s' % (oldmodule, branches, branch) | |
552 | if not isdir(module, self.last_changed): |
|
552 | if not isdir(module, self.last_changed): | |
553 | continue |
|
553 | continue | |
554 | brevid = self.latest(module, self.last_changed) |
|
554 | brevid = self.latest(module, self.last_changed) | |
555 | if not brevid: |
|
555 | if not brevid: | |
556 | self.ui.note(_(b'ignoring empty branch %s\n') % branch) |
|
556 | self.ui.note(_(b'ignoring empty branch %s\n') % branch) | |
557 | continue |
|
557 | continue | |
558 | self.ui.note( |
|
558 | self.ui.note( | |
559 | _(b'found branch %s at %d\n') |
|
559 | _(b'found branch %s at %d\n') | |
560 | % (branch, self.revnum(brevid)) |
|
560 | % (branch, self.revnum(brevid)) | |
561 | ) |
|
561 | ) | |
562 | self.heads.append(brevid) |
|
562 | self.heads.append(brevid) | |
563 |
|
563 | |||
564 | if self.startrev and self.heads: |
|
564 | if self.startrev and self.heads: | |
565 | if len(self.heads) > 1: |
|
565 | if len(self.heads) > 1: | |
566 | raise error.Abort( |
|
566 | raise error.Abort( | |
567 | _( |
|
567 | _( | |
568 | b'svn: start revision is not supported ' |
|
568 | b'svn: start revision is not supported ' | |
569 | b'with more than one branch' |
|
569 | b'with more than one branch' | |
570 | ) |
|
570 | ) | |
571 | ) |
|
571 | ) | |
572 | revnum = self.revnum(self.heads[0]) |
|
572 | revnum = self.revnum(self.heads[0]) | |
573 | if revnum < self.startrev: |
|
573 | if revnum < self.startrev: | |
574 | raise error.Abort( |
|
574 | raise error.Abort( | |
575 | _(b'svn: no revision found after start revision %d') |
|
575 | _(b'svn: no revision found after start revision %d') | |
576 | % self.startrev |
|
576 | % self.startrev | |
577 | ) |
|
577 | ) | |
578 |
|
578 | |||
579 | return self.heads |
|
579 | return self.heads | |
580 |
|
580 | |||
581 | def _getchanges(self, rev, full): |
|
581 | def _getchanges(self, rev, full): | |
582 | (paths, parents) = self.paths[rev] |
|
582 | (paths, parents) = self.paths[rev] | |
583 | copies = {} |
|
583 | copies = {} | |
584 | if parents: |
|
584 | if parents: | |
585 | files, self.removed, copies = self.expandpaths(rev, paths, parents) |
|
585 | files, self.removed, copies = self.expandpaths(rev, paths, parents) | |
586 | if full or not parents: |
|
586 | if full or not parents: | |
587 | # Perform a full checkout on roots |
|
587 | # Perform a full checkout on roots | |
588 | uuid, module, revnum = revsplit(rev) |
|
588 | uuid, module, revnum = revsplit(rev) | |
589 | entries = svn.client.ls( |
|
589 | entries = svn.client.ls( | |
590 | self.baseurl + quote(module), optrev(revnum), True, self.ctx |
|
590 | self.baseurl + quote(module), optrev(revnum), True, self.ctx | |
591 | ) |
|
591 | ) | |
592 | files = [ |
|
592 | files = [ | |
593 | n |
|
593 | n | |
594 | for n, e in pycompat.iteritems(entries) |
|
594 | for n, e in pycompat.iteritems(entries) | |
595 | if e.kind == svn.core.svn_node_file |
|
595 | if e.kind == svn.core.svn_node_file | |
596 | ] |
|
596 | ] | |
597 | self.removed = set() |
|
597 | self.removed = set() | |
598 |
|
598 | |||
599 | files.sort() |
|
599 | files.sort() | |
600 | files = zip(files, [rev] * len(files)) |
|
600 | files = zip(files, [rev] * len(files)) | |
601 | return (files, copies) |
|
601 | return (files, copies) | |
602 |
|
602 | |||
603 | def getchanges(self, rev, full): |
|
603 | def getchanges(self, rev, full): | |
604 | # reuse cache from getchangedfiles |
|
604 | # reuse cache from getchangedfiles | |
605 | if self._changescache[0] == rev and not full: |
|
605 | if self._changescache[0] == rev and not full: | |
606 | (files, copies) = self._changescache[1] |
|
606 | (files, copies) = self._changescache[1] | |
607 | else: |
|
607 | else: | |
608 | (files, copies) = self._getchanges(rev, full) |
|
608 | (files, copies) = self._getchanges(rev, full) | |
609 | # caller caches the result, so free it here to release memory |
|
609 | # caller caches the result, so free it here to release memory | |
610 | del self.paths[rev] |
|
610 | del self.paths[rev] | |
611 | return (files, copies, set()) |
|
611 | return (files, copies, set()) | |
612 |
|
612 | |||
613 | def getchangedfiles(self, rev, i): |
|
613 | def getchangedfiles(self, rev, i): | |
614 | # called from filemap - cache computed values for reuse in getchanges |
|
614 | # called from filemap - cache computed values for reuse in getchanges | |
615 | (files, copies) = self._getchanges(rev, False) |
|
615 | (files, copies) = self._getchanges(rev, False) | |
616 | self._changescache = (rev, (files, copies)) |
|
616 | self._changescache = (rev, (files, copies)) | |
617 | return [f[0] for f in files] |
|
617 | return [f[0] for f in files] | |
618 |
|
618 | |||
619 | def getcommit(self, rev): |
|
619 | def getcommit(self, rev): | |
620 | if rev not in self.commits: |
|
620 | if rev not in self.commits: | |
621 | uuid, module, revnum = revsplit(rev) |
|
621 | uuid, module, revnum = revsplit(rev) | |
622 | self.module = module |
|
622 | self.module = module | |
623 | self.reparent(module) |
|
623 | self.reparent(module) | |
624 | # We assume that: |
|
624 | # We assume that: | |
625 | # - requests for revisions after "stop" come from the |
|
625 | # - requests for revisions after "stop" come from the | |
626 | # revision graph backward traversal. Cache all of them |
|
626 | # revision graph backward traversal. Cache all of them | |
627 | # down to stop, they will be used eventually. |
|
627 | # down to stop, they will be used eventually. | |
628 | # - requests for revisions before "stop" come to get |
|
628 | # - requests for revisions before "stop" come to get | |
629 | # isolated branches parents. Just fetch what is needed. |
|
629 | # isolated branches parents. Just fetch what is needed. | |
630 | stop = self.lastrevs.get(module, 0) |
|
630 | stop = self.lastrevs.get(module, 0) | |
631 | if revnum < stop: |
|
631 | if revnum < stop: | |
632 | stop = revnum + 1 |
|
632 | stop = revnum + 1 | |
633 | self._fetch_revisions(revnum, stop) |
|
633 | self._fetch_revisions(revnum, stop) | |
634 | if rev not in self.commits: |
|
634 | if rev not in self.commits: | |
635 | raise error.Abort(_(b'svn: revision %s not found') % revnum) |
|
635 | raise error.Abort(_(b'svn: revision %s not found') % revnum) | |
636 | revcommit = self.commits[rev] |
|
636 | revcommit = self.commits[rev] | |
637 | # caller caches the result, so free it here to release memory |
|
637 | # caller caches the result, so free it here to release memory | |
638 | del self.commits[rev] |
|
638 | del self.commits[rev] | |
639 | return revcommit |
|
639 | return revcommit | |
640 |
|
640 | |||
641 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
641 | def checkrevformat(self, revstr, mapname=b'splicemap'): | |
642 | """ fails if revision format does not match the correct format""" |
|
642 | """ fails if revision format does not match the correct format""" | |
643 | if not re.match( |
|
643 | if not re.match( | |
644 | r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-' |
|
644 | r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-' | |
645 | r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]' |
|
645 | r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]' | |
646 | r'{12,12}(.*)@[0-9]+$', |
|
646 | r'{12,12}(.*)@[0-9]+$', | |
647 | revstr, |
|
647 | revstr, | |
648 | ): |
|
648 | ): | |
649 | raise error.Abort( |
|
649 | raise error.Abort( | |
650 | _(b'%s entry %s is not a valid revision identifier') |
|
650 | _(b'%s entry %s is not a valid revision identifier') | |
651 | % (mapname, revstr) |
|
651 | % (mapname, revstr) | |
652 | ) |
|
652 | ) | |
653 |
|
653 | |||
654 | def numcommits(self): |
|
654 | def numcommits(self): | |
655 | return int(self.head.rsplit(b'@', 1)[1]) - self.startrev |
|
655 | return int(self.head.rsplit(b'@', 1)[1]) - self.startrev | |
656 |
|
656 | |||
657 | def gettags(self): |
|
657 | def gettags(self): | |
658 | tags = {} |
|
658 | tags = {} | |
659 | if self.tags is None: |
|
659 | if self.tags is None: | |
660 | return tags |
|
660 | return tags | |
661 |
|
661 | |||
662 | # svn tags are just a convention, project branches left in a |
|
662 | # svn tags are just a convention, project branches left in a | |
663 | # 'tags' directory. There is no other relationship than |
|
663 | # 'tags' directory. There is no other relationship than | |
664 | # ancestry, which is expensive to discover and makes them hard |
|
664 | # ancestry, which is expensive to discover and makes them hard | |
665 | # to update incrementally. Worse, past revisions may be |
|
665 | # to update incrementally. Worse, past revisions may be | |
666 | # referenced by tags far away in the future, requiring a deep |
|
666 | # referenced by tags far away in the future, requiring a deep | |
667 | # history traversal on every calculation. Current code |
|
667 | # history traversal on every calculation. Current code | |
668 | # performs a single backward traversal, tracking moves within |
|
668 | # performs a single backward traversal, tracking moves within | |
669 | # the tags directory (tag renaming) and recording a new tag |
|
669 | # the tags directory (tag renaming) and recording a new tag | |
670 | # everytime a project is copied from outside the tags |
|
670 | # everytime a project is copied from outside the tags | |
671 | # directory. It also lists deleted tags, this behaviour may |
|
671 | # directory. It also lists deleted tags, this behaviour may | |
672 | # change in the future. |
|
672 | # change in the future. | |
673 | pendings = [] |
|
673 | pendings = [] | |
674 | tagspath = self.tags |
|
674 | tagspath = self.tags | |
675 | start = svn.ra.get_latest_revnum(self.ra) |
|
675 | start = svn.ra.get_latest_revnum(self.ra) | |
676 | stream = self._getlog([self.tags], start, self.startrev) |
|
676 | stream = self._getlog([self.tags], start, self.startrev) | |
677 | try: |
|
677 | try: | |
678 | for entry in stream: |
|
678 | for entry in stream: | |
679 | origpaths, revnum, author, date, message = entry |
|
679 | origpaths, revnum, author, date, message = entry | |
680 | if not origpaths: |
|
680 | if not origpaths: | |
681 | origpaths = [] |
|
681 | origpaths = [] | |
682 | copies = [ |
|
682 | copies = [ | |
683 | (e.copyfrom_path, e.copyfrom_rev, p) |
|
683 | (e.copyfrom_path, e.copyfrom_rev, p) | |
684 | for p, e in pycompat.iteritems(origpaths) |
|
684 | for p, e in pycompat.iteritems(origpaths) | |
685 | if e.copyfrom_path |
|
685 | if e.copyfrom_path | |
686 | ] |
|
686 | ] | |
687 | # Apply moves/copies from more specific to general |
|
687 | # Apply moves/copies from more specific to general | |
688 | copies.sort(reverse=True) |
|
688 | copies.sort(reverse=True) | |
689 |
|
689 | |||
690 | srctagspath = tagspath |
|
690 | srctagspath = tagspath | |
691 | if copies and copies[-1][2] == tagspath: |
|
691 | if copies and copies[-1][2] == tagspath: | |
692 | # Track tags directory moves |
|
692 | # Track tags directory moves | |
693 | srctagspath = copies.pop()[0] |
|
693 | srctagspath = copies.pop()[0] | |
694 |
|
694 | |||
695 | for source, sourcerev, dest in copies: |
|
695 | for source, sourcerev, dest in copies: | |
696 | if not dest.startswith(tagspath + b'/'): |
|
696 | if not dest.startswith(tagspath + b'/'): | |
697 | continue |
|
697 | continue | |
698 | for tag in pendings: |
|
698 | for tag in pendings: | |
699 | if tag[0].startswith(dest): |
|
699 | if tag[0].startswith(dest): | |
700 | tagpath = source + tag[0][len(dest) :] |
|
700 | tagpath = source + tag[0][len(dest) :] | |
701 | tag[:2] = [tagpath, sourcerev] |
|
701 | tag[:2] = [tagpath, sourcerev] | |
702 | break |
|
702 | break | |
703 | else: |
|
703 | else: | |
704 | pendings.append([source, sourcerev, dest]) |
|
704 | pendings.append([source, sourcerev, dest]) | |
705 |
|
705 | |||
706 | # Filter out tags with children coming from different |
|
706 | # Filter out tags with children coming from different | |
707 | # parts of the repository like: |
|
707 | # parts of the repository like: | |
708 | # /tags/tag.1 (from /trunk:10) |
|
708 | # /tags/tag.1 (from /trunk:10) | |
709 | # /tags/tag.1/foo (from /branches/foo:12) |
|
709 | # /tags/tag.1/foo (from /branches/foo:12) | |
710 | # Here/tags/tag.1 discarded as well as its children. |
|
710 | # Here/tags/tag.1 discarded as well as its children. | |
711 | # It happens with tools like cvs2svn. Such tags cannot |
|
711 | # It happens with tools like cvs2svn. Such tags cannot | |
712 | # be represented in mercurial. |
|
712 | # be represented in mercurial. | |
713 | addeds = { |
|
713 | addeds = { | |
714 | p: e.copyfrom_path |
|
714 | p: e.copyfrom_path | |
715 | for p, e in pycompat.iteritems(origpaths) |
|
715 | for p, e in pycompat.iteritems(origpaths) | |
716 | if e.action == b'A' and e.copyfrom_path |
|
716 | if e.action == b'A' and e.copyfrom_path | |
717 | } |
|
717 | } | |
718 | badroots = set() |
|
718 | badroots = set() | |
719 | for destroot in addeds: |
|
719 | for destroot in addeds: | |
720 | for source, sourcerev, dest in pendings: |
|
720 | for source, sourcerev, dest in pendings: | |
721 | if not dest.startswith( |
|
721 | if not dest.startswith( | |
722 | destroot + b'/' |
|
722 | destroot + b'/' | |
723 | ) or source.startswith(addeds[destroot] + b'/'): |
|
723 | ) or source.startswith(addeds[destroot] + b'/'): | |
724 | continue |
|
724 | continue | |
725 | badroots.add(destroot) |
|
725 | badroots.add(destroot) | |
726 | break |
|
726 | break | |
727 |
|
727 | |||
728 | for badroot in badroots: |
|
728 | for badroot in badroots: | |
729 | pendings = [ |
|
729 | pendings = [ | |
730 | p |
|
730 | p | |
731 | for p in pendings |
|
731 | for p in pendings | |
732 | if p[2] != badroot |
|
732 | if p[2] != badroot | |
733 | and not p[2].startswith(badroot + b'/') |
|
733 | and not p[2].startswith(badroot + b'/') | |
734 | ] |
|
734 | ] | |
735 |
|
735 | |||
736 | # Tell tag renamings from tag creations |
|
736 | # Tell tag renamings from tag creations | |
737 | renamings = [] |
|
737 | renamings = [] | |
738 | for source, sourcerev, dest in pendings: |
|
738 | for source, sourcerev, dest in pendings: | |
739 | tagname = dest.split(b'/')[-1] |
|
739 | tagname = dest.split(b'/')[-1] | |
740 | if source.startswith(srctagspath): |
|
740 | if source.startswith(srctagspath): | |
741 | renamings.append([source, sourcerev, tagname]) |
|
741 | renamings.append([source, sourcerev, tagname]) | |
742 | continue |
|
742 | continue | |
743 | if tagname in tags: |
|
743 | if tagname in tags: | |
744 | # Keep the latest tag value |
|
744 | # Keep the latest tag value | |
745 | continue |
|
745 | continue | |
746 | # From revision may be fake, get one with changes |
|
746 | # From revision may be fake, get one with changes | |
747 | try: |
|
747 | try: | |
748 | tagid = self.latest(source, sourcerev) |
|
748 | tagid = self.latest(source, sourcerev) | |
749 | if tagid and tagname not in tags: |
|
749 | if tagid and tagname not in tags: | |
750 | tags[tagname] = tagid |
|
750 | tags[tagname] = tagid | |
751 | except SvnPathNotFound: |
|
751 | except SvnPathNotFound: | |
752 | # It happens when we are following directories |
|
752 | # It happens when we are following directories | |
753 | # we assumed were copied with their parents |
|
753 | # we assumed were copied with their parents | |
754 | # but were really created in the tag |
|
754 | # but were really created in the tag | |
755 | # directory. |
|
755 | # directory. | |
756 | pass |
|
756 | pass | |
757 | pendings = renamings |
|
757 | pendings = renamings | |
758 | tagspath = srctagspath |
|
758 | tagspath = srctagspath | |
759 | finally: |
|
759 | finally: | |
760 | stream.close() |
|
760 | stream.close() | |
761 | return tags |
|
761 | return tags | |
762 |
|
762 | |||
763 | def converted(self, rev, destrev): |
|
763 | def converted(self, rev, destrev): | |
764 | if not self.wc: |
|
764 | if not self.wc: | |
765 | return |
|
765 | return | |
766 | if self.convertfp is None: |
|
766 | if self.convertfp is None: | |
767 | self.convertfp = open( |
|
767 | self.convertfp = open( | |
768 | os.path.join(self.wc, b'.svn', b'hg-shamap'), b'ab' |
|
768 | os.path.join(self.wc, b'.svn', b'hg-shamap'), b'ab' | |
769 | ) |
|
769 | ) | |
770 | self.convertfp.write( |
|
770 | self.convertfp.write( | |
771 | util.tonativeeol(b'%s %d\n' % (destrev, self.revnum(rev))) |
|
771 | util.tonativeeol(b'%s %d\n' % (destrev, self.revnum(rev))) | |
772 | ) |
|
772 | ) | |
773 | self.convertfp.flush() |
|
773 | self.convertfp.flush() | |
774 |
|
774 | |||
775 | def revid(self, revnum, module=None): |
|
775 | def revid(self, revnum, module=None): | |
776 | return b'svn:%s%s@%s' % (self.uuid, module or self.module, revnum) |
|
776 | return b'svn:%s%s@%s' % (self.uuid, module or self.module, revnum) | |
777 |
|
777 | |||
778 | def revnum(self, rev): |
|
778 | def revnum(self, rev): | |
779 | return int(rev.split(b'@')[-1]) |
|
779 | return int(rev.split(b'@')[-1]) | |
780 |
|
780 | |||
781 | def latest(self, path, stop=None): |
|
781 | def latest(self, path, stop=None): | |
782 | """Find the latest revid affecting path, up to stop revision |
|
782 | """Find the latest revid affecting path, up to stop revision | |
783 | number. If stop is None, default to repository latest |
|
783 | number. If stop is None, default to repository latest | |
784 | revision. It may return a revision in a different module, |
|
784 | revision. It may return a revision in a different module, | |
785 | since a branch may be moved without a change being |
|
785 | since a branch may be moved without a change being | |
786 | reported. Return None if computed module does not belong to |
|
786 | reported. Return None if computed module does not belong to | |
787 | rootmodule subtree. |
|
787 | rootmodule subtree. | |
788 | """ |
|
788 | """ | |
789 |
|
789 | |||
790 | def findchanges(path, start, stop=None): |
|
790 | def findchanges(path, start, stop=None): | |
791 | stream = self._getlog([path], start, stop or 1) |
|
791 | stream = self._getlog([path], start, stop or 1) | |
792 | try: |
|
792 | try: | |
793 | for entry in stream: |
|
793 | for entry in stream: | |
794 | paths, revnum, author, date, message = entry |
|
794 | paths, revnum, author, date, message = entry | |
795 | if stop is None and paths: |
|
795 | if stop is None and paths: | |
796 | # We do not know the latest changed revision, |
|
796 | # We do not know the latest changed revision, | |
797 | # keep the first one with changed paths. |
|
797 | # keep the first one with changed paths. | |
798 | break |
|
798 | break | |
799 | if revnum <= stop: |
|
799 | if revnum <= stop: | |
800 | break |
|
800 | break | |
801 |
|
801 | |||
802 | for p in paths: |
|
802 | for p in paths: | |
803 | if not path.startswith(p) or not paths[p].copyfrom_path: |
|
803 | if not path.startswith(p) or not paths[p].copyfrom_path: | |
804 | continue |
|
804 | continue | |
805 | newpath = paths[p].copyfrom_path + path[len(p) :] |
|
805 | newpath = paths[p].copyfrom_path + path[len(p) :] | |
806 | self.ui.debug( |
|
806 | self.ui.debug( | |
807 | b"branch renamed from %s to %s at %d\n" |
|
807 | b"branch renamed from %s to %s at %d\n" | |
808 | % (path, newpath, revnum) |
|
808 | % (path, newpath, revnum) | |
809 | ) |
|
809 | ) | |
810 | path = newpath |
|
810 | path = newpath | |
811 | break |
|
811 | break | |
812 | if not paths: |
|
812 | if not paths: | |
813 | revnum = None |
|
813 | revnum = None | |
814 | return revnum, path |
|
814 | return revnum, path | |
815 | finally: |
|
815 | finally: | |
816 | stream.close() |
|
816 | stream.close() | |
817 |
|
817 | |||
818 | if not path.startswith(self.rootmodule): |
|
818 | if not path.startswith(self.rootmodule): | |
819 | # Requests on foreign branches may be forbidden at server level |
|
819 | # Requests on foreign branches may be forbidden at server level | |
820 | self.ui.debug(b'ignoring foreign branch %r\n' % path) |
|
820 | self.ui.debug(b'ignoring foreign branch %r\n' % path) | |
821 | return None |
|
821 | return None | |
822 |
|
822 | |||
823 | if stop is None: |
|
823 | if stop is None: | |
824 | stop = svn.ra.get_latest_revnum(self.ra) |
|
824 | stop = svn.ra.get_latest_revnum(self.ra) | |
825 | try: |
|
825 | try: | |
826 | prevmodule = self.reparent(b'') |
|
826 | prevmodule = self.reparent(b'') | |
827 | dirent = svn.ra.stat(self.ra, path.strip(b'/'), stop) |
|
827 | dirent = svn.ra.stat(self.ra, path.strip(b'/'), stop) | |
828 | self.reparent(prevmodule) |
|
828 | self.reparent(prevmodule) | |
829 | except svn.core.SubversionException: |
|
829 | except svn.core.SubversionException: | |
830 | dirent = None |
|
830 | dirent = None | |
831 | if not dirent: |
|
831 | if not dirent: | |
832 | raise SvnPathNotFound( |
|
832 | raise SvnPathNotFound( | |
833 | _(b'%s not found up to revision %d') % (path, stop) |
|
833 | _(b'%s not found up to revision %d') % (path, stop) | |
834 | ) |
|
834 | ) | |
835 |
|
835 | |||
836 | # stat() gives us the previous revision on this line of |
|
836 | # stat() gives us the previous revision on this line of | |
837 | # development, but it might be in *another module*. Fetch the |
|
837 | # development, but it might be in *another module*. Fetch the | |
838 | # log and detect renames down to the latest revision. |
|
838 | # log and detect renames down to the latest revision. | |
839 | revnum, realpath = findchanges(path, stop, dirent.created_rev) |
|
839 | revnum, realpath = findchanges(path, stop, dirent.created_rev) | |
840 | if revnum is None: |
|
840 | if revnum is None: | |
841 | # Tools like svnsync can create empty revision, when |
|
841 | # Tools like svnsync can create empty revision, when | |
842 | # synchronizing only a subtree for instance. These empty |
|
842 | # synchronizing only a subtree for instance. These empty | |
843 | # revisions created_rev still have their original values |
|
843 | # revisions created_rev still have their original values | |
844 | # despite all changes having disappeared and can be |
|
844 | # despite all changes having disappeared and can be | |
845 | # returned by ra.stat(), at least when stating the root |
|
845 | # returned by ra.stat(), at least when stating the root | |
846 | # module. In that case, do not trust created_rev and scan |
|
846 | # module. In that case, do not trust created_rev and scan | |
847 | # the whole history. |
|
847 | # the whole history. | |
848 | revnum, realpath = findchanges(path, stop) |
|
848 | revnum, realpath = findchanges(path, stop) | |
849 | if revnum is None: |
|
849 | if revnum is None: | |
850 | self.ui.debug(b'ignoring empty branch %r\n' % realpath) |
|
850 | self.ui.debug(b'ignoring empty branch %r\n' % realpath) | |
851 | return None |
|
851 | return None | |
852 |
|
852 | |||
853 | if not realpath.startswith(self.rootmodule): |
|
853 | if not realpath.startswith(self.rootmodule): | |
854 | self.ui.debug(b'ignoring foreign branch %r\n' % realpath) |
|
854 | self.ui.debug(b'ignoring foreign branch %r\n' % realpath) | |
855 | return None |
|
855 | return None | |
856 | return self.revid(revnum, realpath) |
|
856 | return self.revid(revnum, realpath) | |
857 |
|
857 | |||
858 | def reparent(self, module): |
|
858 | def reparent(self, module): | |
859 | """Reparent the svn transport and return the previous parent.""" |
|
859 | """Reparent the svn transport and return the previous parent.""" | |
860 | if self.prevmodule == module: |
|
860 | if self.prevmodule == module: | |
861 | return module |
|
861 | return module | |
862 | svnurl = self.baseurl + quote(module) |
|
862 | svnurl = self.baseurl + quote(module) | |
863 | prevmodule = self.prevmodule |
|
863 | prevmodule = self.prevmodule | |
864 | if prevmodule is None: |
|
864 | if prevmodule is None: | |
865 | prevmodule = b'' |
|
865 | prevmodule = b'' | |
866 | self.ui.debug(b"reparent to %s\n" % svnurl) |
|
866 | self.ui.debug(b"reparent to %s\n" % svnurl) | |
867 | svn.ra.reparent(self.ra, svnurl) |
|
867 | svn.ra.reparent(self.ra, svnurl) | |
868 | self.prevmodule = module |
|
868 | self.prevmodule = module | |
869 | return prevmodule |
|
869 | return prevmodule | |
870 |
|
870 | |||
871 | def expandpaths(self, rev, paths, parents): |
|
871 | def expandpaths(self, rev, paths, parents): | |
872 | changed, removed = set(), set() |
|
872 | changed, removed = set(), set() | |
873 | copies = {} |
|
873 | copies = {} | |
874 |
|
874 | |||
875 | new_module, revnum = revsplit(rev)[1:] |
|
875 | new_module, revnum = revsplit(rev)[1:] | |
876 | if new_module != self.module: |
|
876 | if new_module != self.module: | |
877 | self.module = new_module |
|
877 | self.module = new_module | |
878 | self.reparent(self.module) |
|
878 | self.reparent(self.module) | |
879 |
|
879 | |||
880 | progress = self.ui.makeprogress( |
|
880 | progress = self.ui.makeprogress( | |
881 | _(b'scanning paths'), unit=_(b'paths'), total=len(paths) |
|
881 | _(b'scanning paths'), unit=_(b'paths'), total=len(paths) | |
882 | ) |
|
882 | ) | |
883 | for i, (path, ent) in enumerate(paths): |
|
883 | for i, (path, ent) in enumerate(paths): | |
884 | progress.update(i, item=path) |
|
884 | progress.update(i, item=path) | |
885 | entrypath = self.getrelpath(path) |
|
885 | entrypath = self.getrelpath(path) | |
886 |
|
886 | |||
887 | kind = self._checkpath(entrypath, revnum) |
|
887 | kind = self._checkpath(entrypath, revnum) | |
888 | if kind == svn.core.svn_node_file: |
|
888 | if kind == svn.core.svn_node_file: | |
889 | changed.add(self.recode(entrypath)) |
|
889 | changed.add(self.recode(entrypath)) | |
890 | if not ent.copyfrom_path or not parents: |
|
890 | if not ent.copyfrom_path or not parents: | |
891 | continue |
|
891 | continue | |
892 | # Copy sources not in parent revisions cannot be |
|
892 | # Copy sources not in parent revisions cannot be | |
893 | # represented, ignore their origin for now |
|
893 | # represented, ignore their origin for now | |
894 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
894 | pmodule, prevnum = revsplit(parents[0])[1:] | |
895 | if ent.copyfrom_rev < prevnum: |
|
895 | if ent.copyfrom_rev < prevnum: | |
896 | continue |
|
896 | continue | |
897 | copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule) |
|
897 | copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule) | |
898 | if not copyfrom_path: |
|
898 | if not copyfrom_path: | |
899 | continue |
|
899 | continue | |
900 | self.ui.debug( |
|
900 | self.ui.debug( | |
901 | b"copied to %s from %s@%s\n" |
|
901 | b"copied to %s from %s@%s\n" | |
902 | % (entrypath, copyfrom_path, ent.copyfrom_rev) |
|
902 | % (entrypath, copyfrom_path, ent.copyfrom_rev) | |
903 | ) |
|
903 | ) | |
904 | copies[self.recode(entrypath)] = self.recode(copyfrom_path) |
|
904 | copies[self.recode(entrypath)] = self.recode(copyfrom_path) | |
905 | elif kind == 0: # gone, but had better be a deleted *file* |
|
905 | elif kind == 0: # gone, but had better be a deleted *file* | |
906 | self.ui.debug(b"gone from %s\n" % ent.copyfrom_rev) |
|
906 | self.ui.debug(b"gone from %s\n" % ent.copyfrom_rev) | |
907 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
907 | pmodule, prevnum = revsplit(parents[0])[1:] | |
908 | parentpath = pmodule + b"/" + entrypath |
|
908 | parentpath = pmodule + b"/" + entrypath | |
909 | fromkind = self._checkpath(entrypath, prevnum, pmodule) |
|
909 | fromkind = self._checkpath(entrypath, prevnum, pmodule) | |
910 |
|
910 | |||
911 | if fromkind == svn.core.svn_node_file: |
|
911 | if fromkind == svn.core.svn_node_file: | |
912 | removed.add(self.recode(entrypath)) |
|
912 | removed.add(self.recode(entrypath)) | |
913 | elif fromkind == svn.core.svn_node_dir: |
|
913 | elif fromkind == svn.core.svn_node_dir: | |
914 | oroot = parentpath.strip(b'/') |
|
914 | oroot = parentpath.strip(b'/') | |
915 | nroot = path.strip(b'/') |
|
915 | nroot = path.strip(b'/') | |
916 | children = self._iterfiles(oroot, prevnum) |
|
916 | children = self._iterfiles(oroot, prevnum) | |
917 | for childpath in children: |
|
917 | for childpath in children: | |
918 | childpath = childpath.replace(oroot, nroot) |
|
918 | childpath = childpath.replace(oroot, nroot) | |
919 | childpath = self.getrelpath(b"/" + childpath, pmodule) |
|
919 | childpath = self.getrelpath(b"/" + childpath, pmodule) | |
920 | if childpath: |
|
920 | if childpath: | |
921 | removed.add(self.recode(childpath)) |
|
921 | removed.add(self.recode(childpath)) | |
922 | else: |
|
922 | else: | |
923 | self.ui.debug( |
|
923 | self.ui.debug( | |
924 | b'unknown path in revision %d: %s\n' % (revnum, path) |
|
924 | b'unknown path in revision %d: %s\n' % (revnum, path) | |
925 | ) |
|
925 | ) | |
926 | elif kind == svn.core.svn_node_dir: |
|
926 | elif kind == svn.core.svn_node_dir: | |
927 | if ent.action == b'M': |
|
927 | if ent.action == b'M': | |
928 | # If the directory just had a prop change, |
|
928 | # If the directory just had a prop change, | |
929 | # then we shouldn't need to look for its children. |
|
929 | # then we shouldn't need to look for its children. | |
930 | continue |
|
930 | continue | |
931 | if ent.action == b'R' and parents: |
|
931 | if ent.action == b'R' and parents: | |
932 | # If a directory is replacing a file, mark the previous |
|
932 | # If a directory is replacing a file, mark the previous | |
933 | # file as deleted |
|
933 | # file as deleted | |
934 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
934 | pmodule, prevnum = revsplit(parents[0])[1:] | |
935 | pkind = self._checkpath(entrypath, prevnum, pmodule) |
|
935 | pkind = self._checkpath(entrypath, prevnum, pmodule) | |
936 | if pkind == svn.core.svn_node_file: |
|
936 | if pkind == svn.core.svn_node_file: | |
937 | removed.add(self.recode(entrypath)) |
|
937 | removed.add(self.recode(entrypath)) | |
938 | elif pkind == svn.core.svn_node_dir: |
|
938 | elif pkind == svn.core.svn_node_dir: | |
939 | # We do not know what files were kept or removed, |
|
939 | # We do not know what files were kept or removed, | |
940 | # mark them all as changed. |
|
940 | # mark them all as changed. | |
941 | for childpath in self._iterfiles(pmodule, prevnum): |
|
941 | for childpath in self._iterfiles(pmodule, prevnum): | |
942 | childpath = self.getrelpath(b"/" + childpath) |
|
942 | childpath = self.getrelpath(b"/" + childpath) | |
943 | if childpath: |
|
943 | if childpath: | |
944 | changed.add(self.recode(childpath)) |
|
944 | changed.add(self.recode(childpath)) | |
945 |
|
945 | |||
946 | for childpath in self._iterfiles(path, revnum): |
|
946 | for childpath in self._iterfiles(path, revnum): | |
947 | childpath = self.getrelpath(b"/" + childpath) |
|
947 | childpath = self.getrelpath(b"/" + childpath) | |
948 | if childpath: |
|
948 | if childpath: | |
949 | changed.add(self.recode(childpath)) |
|
949 | changed.add(self.recode(childpath)) | |
950 |
|
950 | |||
951 | # Handle directory copies |
|
951 | # Handle directory copies | |
952 | if not ent.copyfrom_path or not parents: |
|
952 | if not ent.copyfrom_path or not parents: | |
953 | continue |
|
953 | continue | |
954 | # Copy sources not in parent revisions cannot be |
|
954 | # Copy sources not in parent revisions cannot be | |
955 | # represented, ignore their origin for now |
|
955 | # represented, ignore their origin for now | |
956 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
956 | pmodule, prevnum = revsplit(parents[0])[1:] | |
957 | if ent.copyfrom_rev < prevnum: |
|
957 | if ent.copyfrom_rev < prevnum: | |
958 | continue |
|
958 | continue | |
959 | copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule) |
|
959 | copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule) | |
960 | if not copyfrompath: |
|
960 | if not copyfrompath: | |
961 | continue |
|
961 | continue | |
962 | self.ui.debug( |
|
962 | self.ui.debug( | |
963 | b"mark %s came from %s:%d\n" |
|
963 | b"mark %s came from %s:%d\n" | |
964 | % (path, copyfrompath, ent.copyfrom_rev) |
|
964 | % (path, copyfrompath, ent.copyfrom_rev) | |
965 | ) |
|
965 | ) | |
966 | children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev) |
|
966 | children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev) | |
967 | for childpath in children: |
|
967 | for childpath in children: | |
968 | childpath = self.getrelpath(b"/" + childpath, pmodule) |
|
968 | childpath = self.getrelpath(b"/" + childpath, pmodule) | |
969 | if not childpath: |
|
969 | if not childpath: | |
970 | continue |
|
970 | continue | |
971 | copytopath = path + childpath[len(copyfrompath) :] |
|
971 | copytopath = path + childpath[len(copyfrompath) :] | |
972 | copytopath = self.getrelpath(copytopath) |
|
972 | copytopath = self.getrelpath(copytopath) | |
973 | copies[self.recode(copytopath)] = self.recode(childpath) |
|
973 | copies[self.recode(copytopath)] = self.recode(childpath) | |
974 |
|
974 | |||
975 | progress.complete() |
|
975 | progress.complete() | |
976 | changed.update(removed) |
|
976 | changed.update(removed) | |
977 | return (list(changed), removed, copies) |
|
977 | return (list(changed), removed, copies) | |
978 |
|
978 | |||
979 | def _fetch_revisions(self, from_revnum, to_revnum): |
|
979 | def _fetch_revisions(self, from_revnum, to_revnum): | |
980 | if from_revnum < to_revnum: |
|
980 | if from_revnum < to_revnum: | |
981 | from_revnum, to_revnum = to_revnum, from_revnum |
|
981 | from_revnum, to_revnum = to_revnum, from_revnum | |
982 |
|
982 | |||
983 | self.child_cset = None |
|
983 | self.child_cset = None | |
984 |
|
984 | |||
985 | def parselogentry(orig_paths, revnum, author, date, message): |
|
985 | def parselogentry(orig_paths, revnum, author, date, message): | |
986 | """Return the parsed commit object or None, and True if |
|
986 | """Return the parsed commit object or None, and True if | |
987 | the revision is a branch root. |
|
987 | the revision is a branch root. | |
988 | """ |
|
988 | """ | |
989 | self.ui.debug( |
|
989 | self.ui.debug( | |
990 | b"parsing revision %d (%d changes)\n" |
|
990 | b"parsing revision %d (%d changes)\n" | |
991 | % (revnum, len(orig_paths)) |
|
991 | % (revnum, len(orig_paths)) | |
992 | ) |
|
992 | ) | |
993 |
|
993 | |||
994 | branched = False |
|
994 | branched = False | |
995 | rev = self.revid(revnum) |
|
995 | rev = self.revid(revnum) | |
996 | # branch log might return entries for a parent we already have |
|
996 | # branch log might return entries for a parent we already have | |
997 |
|
997 | |||
998 | if rev in self.commits or revnum < to_revnum: |
|
998 | if rev in self.commits or revnum < to_revnum: | |
999 | return None, branched |
|
999 | return None, branched | |
1000 |
|
1000 | |||
1001 | parents = [] |
|
1001 | parents = [] | |
1002 | # check whether this revision is the start of a branch or part |
|
1002 | # check whether this revision is the start of a branch or part | |
1003 | # of a branch renaming |
|
1003 | # of a branch renaming | |
1004 | orig_paths = sorted(pycompat.iteritems(orig_paths)) |
|
1004 | orig_paths = sorted(pycompat.iteritems(orig_paths)) | |
1005 | root_paths = [ |
|
1005 | root_paths = [ | |
1006 | (p, e) for p, e in orig_paths if self.module.startswith(p) |
|
1006 | (p, e) for p, e in orig_paths if self.module.startswith(p) | |
1007 | ] |
|
1007 | ] | |
1008 | if root_paths: |
|
1008 | if root_paths: | |
1009 | path, ent = root_paths[-1] |
|
1009 | path, ent = root_paths[-1] | |
1010 | if ent.copyfrom_path: |
|
1010 | if ent.copyfrom_path: | |
1011 | branched = True |
|
1011 | branched = True | |
1012 | newpath = ent.copyfrom_path + self.module[len(path) :] |
|
1012 | newpath = ent.copyfrom_path + self.module[len(path) :] | |
1013 | # ent.copyfrom_rev may not be the actual last revision |
|
1013 | # ent.copyfrom_rev may not be the actual last revision | |
1014 | previd = self.latest(newpath, ent.copyfrom_rev) |
|
1014 | previd = self.latest(newpath, ent.copyfrom_rev) | |
1015 | if previd is not None: |
|
1015 | if previd is not None: | |
1016 | prevmodule, prevnum = revsplit(previd)[1:] |
|
1016 | prevmodule, prevnum = revsplit(previd)[1:] | |
1017 | if prevnum >= self.startrev: |
|
1017 | if prevnum >= self.startrev: | |
1018 | parents = [previd] |
|
1018 | parents = [previd] | |
1019 | self.ui.note( |
|
1019 | self.ui.note( | |
1020 | _(b'found parent of branch %s at %d: %s\n') |
|
1020 | _(b'found parent of branch %s at %d: %s\n') | |
1021 | % (self.module, prevnum, prevmodule) |
|
1021 | % (self.module, prevnum, prevmodule) | |
1022 | ) |
|
1022 | ) | |
1023 | else: |
|
1023 | else: | |
1024 | self.ui.debug(b"no copyfrom path, don't know what to do.\n") |
|
1024 | self.ui.debug(b"no copyfrom path, don't know what to do.\n") | |
1025 |
|
1025 | |||
1026 | paths = [] |
|
1026 | paths = [] | |
1027 | # filter out unrelated paths |
|
1027 | # filter out unrelated paths | |
1028 | for path, ent in orig_paths: |
|
1028 | for path, ent in orig_paths: | |
1029 | if self.getrelpath(path) is None: |
|
1029 | if self.getrelpath(path) is None: | |
1030 | continue |
|
1030 | continue | |
1031 | paths.append((path, ent)) |
|
1031 | paths.append((path, ent)) | |
1032 |
|
1032 | |||
1033 | # Example SVN datetime. Includes microseconds. |
|
1033 | # Example SVN datetime. Includes microseconds. | |
1034 | # ISO-8601 conformant |
|
1034 | # ISO-8601 conformant | |
1035 | # '2007-01-04T17:35:00.902377Z' |
|
1035 | # '2007-01-04T17:35:00.902377Z' | |
1036 | date = dateutil.parsedate( |
|
1036 | date = dateutil.parsedate( | |
1037 | date[:19] + b" UTC", [b"%Y-%m-%dT%H:%M:%S"] |
|
1037 | date[:19] + b" UTC", [b"%Y-%m-%dT%H:%M:%S"] | |
1038 | ) |
|
1038 | ) | |
1039 | if self.ui.configbool(b'convert', b'localtimezone'): |
|
1039 | if self.ui.configbool(b'convert', b'localtimezone'): | |
1040 | date = makedatetimestamp(date[0]) |
|
1040 | date = makedatetimestamp(date[0]) | |
1041 |
|
1041 | |||
1042 | if message: |
|
1042 | if message: | |
1043 | log = self.recode(message) |
|
1043 | log = self.recode(message) | |
1044 | else: |
|
1044 | else: | |
1045 | log = b'' |
|
1045 | log = b'' | |
1046 |
|
1046 | |||
1047 | if author: |
|
1047 | if author: | |
1048 | author = self.recode(author) |
|
1048 | author = self.recode(author) | |
1049 | else: |
|
1049 | else: | |
1050 | author = b'' |
|
1050 | author = b'' | |
1051 |
|
1051 | |||
1052 | try: |
|
1052 | try: | |
1053 | branch = self.module.split(b"/")[-1] |
|
1053 | branch = self.module.split(b"/")[-1] | |
1054 | if branch == self.trunkname: |
|
1054 | if branch == self.trunkname: | |
1055 | branch = None |
|
1055 | branch = None | |
1056 | except IndexError: |
|
1056 | except IndexError: | |
1057 | branch = None |
|
1057 | branch = None | |
1058 |
|
1058 | |||
1059 | cset = commit( |
|
1059 | cset = commit( | |
1060 | author=author, |
|
1060 | author=author, | |
1061 | date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'), |
|
1061 | date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'), | |
1062 | desc=log, |
|
1062 | desc=log, | |
1063 | parents=parents, |
|
1063 | parents=parents, | |
1064 | branch=branch, |
|
1064 | branch=branch, | |
1065 | rev=rev, |
|
1065 | rev=rev, | |
1066 | ) |
|
1066 | ) | |
1067 |
|
1067 | |||
1068 | self.commits[rev] = cset |
|
1068 | self.commits[rev] = cset | |
1069 | # The parents list is *shared* among self.paths and the |
|
1069 | # The parents list is *shared* among self.paths and the | |
1070 | # commit object. Both will be updated below. |
|
1070 | # commit object. Both will be updated below. | |
1071 | self.paths[rev] = (paths, cset.parents) |
|
1071 | self.paths[rev] = (paths, cset.parents) | |
1072 | if self.child_cset and not self.child_cset.parents: |
|
1072 | if self.child_cset and not self.child_cset.parents: | |
1073 | self.child_cset.parents[:] = [rev] |
|
1073 | self.child_cset.parents[:] = [rev] | |
1074 | self.child_cset = cset |
|
1074 | self.child_cset = cset | |
1075 | return cset, branched |
|
1075 | return cset, branched | |
1076 |
|
1076 | |||
1077 | self.ui.note( |
|
1077 | self.ui.note( | |
1078 | _(b'fetching revision log for "%s" from %d to %d\n') |
|
1078 | _(b'fetching revision log for "%s" from %d to %d\n') | |
1079 | % (self.module, from_revnum, to_revnum) |
|
1079 | % (self.module, from_revnum, to_revnum) | |
1080 | ) |
|
1080 | ) | |
1081 |
|
1081 | |||
1082 | try: |
|
1082 | try: | |
1083 | firstcset = None |
|
1083 | firstcset = None | |
1084 | lastonbranch = False |
|
1084 | lastonbranch = False | |
1085 | stream = self._getlog([self.module], from_revnum, to_revnum) |
|
1085 | stream = self._getlog([self.module], from_revnum, to_revnum) | |
1086 | try: |
|
1086 | try: | |
1087 | for entry in stream: |
|
1087 | for entry in stream: | |
1088 | paths, revnum, author, date, message = entry |
|
1088 | paths, revnum, author, date, message = entry | |
1089 | if revnum < self.startrev: |
|
1089 | if revnum < self.startrev: | |
1090 | lastonbranch = True |
|
1090 | lastonbranch = True | |
1091 | break |
|
1091 | break | |
1092 | if not paths: |
|
1092 | if not paths: | |
1093 | self.ui.debug(b'revision %d has no entries\n' % revnum) |
|
1093 | self.ui.debug(b'revision %d has no entries\n' % revnum) | |
1094 | # If we ever leave the loop on an empty |
|
1094 | # If we ever leave the loop on an empty | |
1095 | # revision, do not try to get a parent branch |
|
1095 | # revision, do not try to get a parent branch | |
1096 | lastonbranch = lastonbranch or revnum == 0 |
|
1096 | lastonbranch = lastonbranch or revnum == 0 | |
1097 | continue |
|
1097 | continue | |
1098 | cset, lastonbranch = parselogentry( |
|
1098 | cset, lastonbranch = parselogentry( | |
1099 | paths, revnum, author, date, message |
|
1099 | paths, revnum, author, date, message | |
1100 | ) |
|
1100 | ) | |
1101 | if cset: |
|
1101 | if cset: | |
1102 | firstcset = cset |
|
1102 | firstcset = cset | |
1103 | if lastonbranch: |
|
1103 | if lastonbranch: | |
1104 | break |
|
1104 | break | |
1105 | finally: |
|
1105 | finally: | |
1106 | stream.close() |
|
1106 | stream.close() | |
1107 |
|
1107 | |||
1108 | if not lastonbranch and firstcset and not firstcset.parents: |
|
1108 | if not lastonbranch and firstcset and not firstcset.parents: | |
1109 | # The first revision of the sequence (the last fetched one) |
|
1109 | # The first revision of the sequence (the last fetched one) | |
1110 | # has invalid parents if not a branch root. Find the parent |
|
1110 | # has invalid parents if not a branch root. Find the parent | |
1111 | # revision now, if any. |
|
1111 | # revision now, if any. | |
1112 | try: |
|
1112 | try: | |
1113 | firstrevnum = self.revnum(firstcset.rev) |
|
1113 | firstrevnum = self.revnum(firstcset.rev) | |
1114 | if firstrevnum > 1: |
|
1114 | if firstrevnum > 1: | |
1115 | latest = self.latest(self.module, firstrevnum - 1) |
|
1115 | latest = self.latest(self.module, firstrevnum - 1) | |
1116 | if latest: |
|
1116 | if latest: | |
1117 | firstcset.parents.append(latest) |
|
1117 | firstcset.parents.append(latest) | |
1118 | except SvnPathNotFound: |
|
1118 | except SvnPathNotFound: | |
1119 | pass |
|
1119 | pass | |
1120 | except svn.core.SubversionException as xxx_todo_changeme: |
|
1120 | except svn.core.SubversionException as xxx_todo_changeme: | |
1121 | (inst, num) = xxx_todo_changeme.args |
|
1121 | (inst, num) = xxx_todo_changeme.args | |
1122 | if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION: |
|
1122 | if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION: | |
1123 | raise error.Abort( |
|
1123 | raise error.Abort( | |
1124 | _(b'svn: branch has no revision %s') % to_revnum |
|
1124 | _(b'svn: branch has no revision %s') % to_revnum | |
1125 | ) |
|
1125 | ) | |
1126 | raise |
|
1126 | raise | |
1127 |
|
1127 | |||
1128 | def getfile(self, file, rev): |
|
1128 | def getfile(self, file, rev): | |
1129 | # TODO: ra.get_file transmits the whole file instead of diffs. |
|
1129 | # TODO: ra.get_file transmits the whole file instead of diffs. | |
1130 | if file in self.removed: |
|
1130 | if file in self.removed: | |
1131 | return None, None |
|
1131 | return None, None | |
1132 | try: |
|
1132 | try: | |
1133 | new_module, revnum = revsplit(rev)[1:] |
|
1133 | new_module, revnum = revsplit(rev)[1:] | |
1134 | if self.module != new_module: |
|
1134 | if self.module != new_module: | |
1135 | self.module = new_module |
|
1135 | self.module = new_module | |
1136 | self.reparent(self.module) |
|
1136 | self.reparent(self.module) | |
1137 | io = stringio() |
|
1137 | io = stringio() | |
1138 | info = svn.ra.get_file(self.ra, file, revnum, io) |
|
1138 | info = svn.ra.get_file(self.ra, file, revnum, io) | |
1139 | data = io.getvalue() |
|
1139 | data = io.getvalue() | |
1140 | # ra.get_file() seems to keep a reference on the input buffer |
|
1140 | # ra.get_file() seems to keep a reference on the input buffer | |
1141 | # preventing collection. Release it explicitly. |
|
1141 | # preventing collection. Release it explicitly. | |
1142 | io.close() |
|
1142 | io.close() | |
1143 | if isinstance(info, list): |
|
1143 | if isinstance(info, list): | |
1144 | info = info[-1] |
|
1144 | info = info[-1] | |
1145 | mode = (b"svn:executable" in info) and b'x' or b'' |
|
1145 | mode = (b"svn:executable" in info) and b'x' or b'' | |
1146 | mode = (b"svn:special" in info) and b'l' or mode |
|
1146 | mode = (b"svn:special" in info) and b'l' or mode | |
1147 | except svn.core.SubversionException as e: |
|
1147 | except svn.core.SubversionException as e: | |
1148 | notfound = ( |
|
1148 | notfound = ( | |
1149 | svn.core.SVN_ERR_FS_NOT_FOUND, |
|
1149 | svn.core.SVN_ERR_FS_NOT_FOUND, | |
1150 | svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND, |
|
1150 | svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND, | |
1151 | ) |
|
1151 | ) | |
1152 | if e.apr_err in notfound: # File not found |
|
1152 | if e.apr_err in notfound: # File not found | |
1153 | return None, None |
|
1153 | return None, None | |
1154 | raise |
|
1154 | raise | |
1155 | if mode == b'l': |
|
1155 | if mode == b'l': | |
1156 | link_prefix = b"link " |
|
1156 | link_prefix = b"link " | |
1157 | if data.startswith(link_prefix): |
|
1157 | if data.startswith(link_prefix): | |
1158 | data = data[len(link_prefix) :] |
|
1158 | data = data[len(link_prefix) :] | |
1159 | return data, mode |
|
1159 | return data, mode | |
1160 |
|
1160 | |||
1161 | def _iterfiles(self, path, revnum): |
|
1161 | def _iterfiles(self, path, revnum): | |
1162 | """Enumerate all files in path at revnum, recursively.""" |
|
1162 | """Enumerate all files in path at revnum, recursively.""" | |
1163 | path = path.strip(b'/') |
|
1163 | path = path.strip(b'/') | |
1164 | pool = svn.core.Pool() |
|
1164 | pool = svn.core.Pool() | |
1165 | rpath = b'/'.join([self.baseurl, quote(path)]).strip(b'/') |
|
1165 | rpath = b'/'.join([self.baseurl, quote(path)]).strip(b'/') | |
1166 | entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool) |
|
1166 | entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool) | |
1167 | if path: |
|
1167 | if path: | |
1168 | path += b'/' |
|
1168 | path += b'/' | |
1169 | return ( |
|
1169 | return ( | |
1170 | (path + p) |
|
1170 | (path + p) | |
1171 | for p, e in pycompat.iteritems(entries) |
|
1171 | for p, e in pycompat.iteritems(entries) | |
1172 | if e.kind == svn.core.svn_node_file |
|
1172 | if e.kind == svn.core.svn_node_file | |
1173 | ) |
|
1173 | ) | |
1174 |
|
1174 | |||
1175 | def getrelpath(self, path, module=None): |
|
1175 | def getrelpath(self, path, module=None): | |
1176 | if module is None: |
|
1176 | if module is None: | |
1177 | module = self.module |
|
1177 | module = self.module | |
1178 | # Given the repository url of this wc, say |
|
1178 | # Given the repository url of this wc, say | |
1179 | # "http://server/plone/CMFPlone/branches/Plone-2_0-branch" |
|
1179 | # "http://server/plone/CMFPlone/branches/Plone-2_0-branch" | |
1180 | # extract the "entry" portion (a relative path) from what |
|
1180 | # extract the "entry" portion (a relative path) from what | |
1181 | # svn log --xml says, i.e. |
|
1181 | # svn log --xml says, i.e. | |
1182 | # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py" |
|
1182 | # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py" | |
1183 | # that is to say "tests/PloneTestCase.py" |
|
1183 | # that is to say "tests/PloneTestCase.py" | |
1184 | if path.startswith(module): |
|
1184 | if path.startswith(module): | |
1185 | relative = path.rstrip(b'/')[len(module) :] |
|
1185 | relative = path.rstrip(b'/')[len(module) :] | |
1186 | if relative.startswith(b'/'): |
|
1186 | if relative.startswith(b'/'): | |
1187 | return relative[1:] |
|
1187 | return relative[1:] | |
1188 | elif relative == b'': |
|
1188 | elif relative == b'': | |
1189 | return relative |
|
1189 | return relative | |
1190 |
|
1190 | |||
1191 | # The path is outside our tracked tree... |
|
1191 | # The path is outside our tracked tree... | |
1192 | self.ui.debug(b'%r is not under %r, ignoring\n' % (path, module)) |
|
1192 | self.ui.debug(b'%r is not under %r, ignoring\n' % (path, module)) | |
1193 | return None |
|
1193 | return None | |
1194 |
|
1194 | |||
1195 | def _checkpath(self, path, revnum, module=None): |
|
1195 | def _checkpath(self, path, revnum, module=None): | |
1196 | if module is not None: |
|
1196 | if module is not None: | |
1197 | prevmodule = self.reparent(b'') |
|
1197 | prevmodule = self.reparent(b'') | |
1198 | path = module + b'/' + path |
|
1198 | path = module + b'/' + path | |
1199 | try: |
|
1199 | try: | |
1200 | # ra.check_path does not like leading slashes very much, it leads |
|
1200 | # ra.check_path does not like leading slashes very much, it leads | |
1201 | # to PROPFIND subversion errors |
|
1201 | # to PROPFIND subversion errors | |
1202 | return svn.ra.check_path(self.ra, path.strip(b'/'), revnum) |
|
1202 | return svn.ra.check_path(self.ra, path.strip(b'/'), revnum) | |
1203 | finally: |
|
1203 | finally: | |
1204 | if module is not None: |
|
1204 | if module is not None: | |
1205 | self.reparent(prevmodule) |
|
1205 | self.reparent(prevmodule) | |
1206 |
|
1206 | |||
1207 | def _getlog( |
|
1207 | def _getlog( | |
1208 | self, |
|
1208 | self, | |
1209 | paths, |
|
1209 | paths, | |
1210 | start, |
|
1210 | start, | |
1211 | end, |
|
1211 | end, | |
1212 | limit=0, |
|
1212 | limit=0, | |
1213 | discover_changed_paths=True, |
|
1213 | discover_changed_paths=True, | |
1214 | strict_node_history=False, |
|
1214 | strict_node_history=False, | |
1215 | ): |
|
1215 | ): | |
1216 | # Normalize path names, svn >= 1.5 only wants paths relative to |
|
1216 | # Normalize path names, svn >= 1.5 only wants paths relative to | |
1217 | # supplied URL |
|
1217 | # supplied URL | |
1218 | relpaths = [] |
|
1218 | relpaths = [] | |
1219 | for p in paths: |
|
1219 | for p in paths: | |
1220 | if not p.startswith(b'/'): |
|
1220 | if not p.startswith(b'/'): | |
1221 | p = self.module + b'/' + p |
|
1221 | p = self.module + b'/' + p | |
1222 | relpaths.append(p.strip(b'/')) |
|
1222 | relpaths.append(p.strip(b'/')) | |
1223 | args = [ |
|
1223 | args = [ | |
1224 | self.baseurl, |
|
1224 | self.baseurl, | |
1225 | relpaths, |
|
1225 | relpaths, | |
1226 | start, |
|
1226 | start, | |
1227 | end, |
|
1227 | end, | |
1228 | limit, |
|
1228 | limit, | |
1229 | discover_changed_paths, |
|
1229 | discover_changed_paths, | |
1230 | strict_node_history, |
|
1230 | strict_node_history, | |
1231 | ] |
|
1231 | ] | |
1232 | # developer config: convert.svn.debugsvnlog |
|
1232 | # developer config: convert.svn.debugsvnlog | |
1233 | if not self.ui.configbool(b'convert', b'svn.debugsvnlog'): |
|
1233 | if not self.ui.configbool(b'convert', b'svn.debugsvnlog'): | |
1234 | return directlogstream(*args) |
|
1234 | return directlogstream(*args) | |
1235 | arg = encodeargs(args) |
|
1235 | arg = encodeargs(args) | |
1236 | hgexe = procutil.hgexecutable() |
|
1236 | hgexe = procutil.hgexecutable() | |
1237 | cmd = b'%s debugsvnlog' % procutil.shellquote(hgexe) |
|
1237 | cmd = b'%s debugsvnlog' % procutil.shellquote(hgexe) | |
1238 |
stdin, stdout = procutil.popen2( |
|
1238 | stdin, stdout = procutil.popen2(cmd) | |
1239 | stdin.write(arg) |
|
1239 | stdin.write(arg) | |
1240 | try: |
|
1240 | try: | |
1241 | stdin.close() |
|
1241 | stdin.close() | |
1242 | except IOError: |
|
1242 | except IOError: | |
1243 | raise error.Abort( |
|
1243 | raise error.Abort( | |
1244 | _( |
|
1244 | _( | |
1245 | b'Mercurial failed to run itself, check' |
|
1245 | b'Mercurial failed to run itself, check' | |
1246 | b' hg executable is in PATH' |
|
1246 | b' hg executable is in PATH' | |
1247 | ) |
|
1247 | ) | |
1248 | ) |
|
1248 | ) | |
1249 | return logstream(stdout) |
|
1249 | return logstream(stdout) | |
1250 |
|
1250 | |||
1251 |
|
1251 | |||
1252 | pre_revprop_change = b'''#!/bin/sh |
|
1252 | pre_revprop_change = b'''#!/bin/sh | |
1253 |
|
1253 | |||
1254 | REPOS="$1" |
|
1254 | REPOS="$1" | |
1255 | REV="$2" |
|
1255 | REV="$2" | |
1256 | USER="$3" |
|
1256 | USER="$3" | |
1257 | PROPNAME="$4" |
|
1257 | PROPNAME="$4" | |
1258 | ACTION="$5" |
|
1258 | ACTION="$5" | |
1259 |
|
1259 | |||
1260 | if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi |
|
1260 | if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi | |
1261 | if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi |
|
1261 | if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi | |
1262 | if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi |
|
1262 | if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi | |
1263 |
|
1263 | |||
1264 | echo "Changing prohibited revision property" >&2 |
|
1264 | echo "Changing prohibited revision property" >&2 | |
1265 | exit 1 |
|
1265 | exit 1 | |
1266 | ''' |
|
1266 | ''' | |
1267 |
|
1267 | |||
1268 |
|
1268 | |||
1269 | class svn_sink(converter_sink, commandline): |
|
1269 | class svn_sink(converter_sink, commandline): | |
1270 | commit_re = re.compile(br'Committed revision (\d+).', re.M) |
|
1270 | commit_re = re.compile(br'Committed revision (\d+).', re.M) | |
1271 | uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M) |
|
1271 | uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M) | |
1272 |
|
1272 | |||
1273 | def prerun(self): |
|
1273 | def prerun(self): | |
1274 | if self.wc: |
|
1274 | if self.wc: | |
1275 | os.chdir(self.wc) |
|
1275 | os.chdir(self.wc) | |
1276 |
|
1276 | |||
1277 | def postrun(self): |
|
1277 | def postrun(self): | |
1278 | if self.wc: |
|
1278 | if self.wc: | |
1279 | os.chdir(self.cwd) |
|
1279 | os.chdir(self.cwd) | |
1280 |
|
1280 | |||
1281 | def join(self, name): |
|
1281 | def join(self, name): | |
1282 | return os.path.join(self.wc, b'.svn', name) |
|
1282 | return os.path.join(self.wc, b'.svn', name) | |
1283 |
|
1283 | |||
1284 | def revmapfile(self): |
|
1284 | def revmapfile(self): | |
1285 | return self.join(b'hg-shamap') |
|
1285 | return self.join(b'hg-shamap') | |
1286 |
|
1286 | |||
1287 | def authorfile(self): |
|
1287 | def authorfile(self): | |
1288 | return self.join(b'hg-authormap') |
|
1288 | return self.join(b'hg-authormap') | |
1289 |
|
1289 | |||
1290 | def __init__(self, ui, repotype, path): |
|
1290 | def __init__(self, ui, repotype, path): | |
1291 |
|
1291 | |||
1292 | converter_sink.__init__(self, ui, repotype, path) |
|
1292 | converter_sink.__init__(self, ui, repotype, path) | |
1293 | commandline.__init__(self, ui, b'svn') |
|
1293 | commandline.__init__(self, ui, b'svn') | |
1294 | self.delete = [] |
|
1294 | self.delete = [] | |
1295 | self.setexec = [] |
|
1295 | self.setexec = [] | |
1296 | self.delexec = [] |
|
1296 | self.delexec = [] | |
1297 | self.copies = [] |
|
1297 | self.copies = [] | |
1298 | self.wc = None |
|
1298 | self.wc = None | |
1299 | self.cwd = encoding.getcwd() |
|
1299 | self.cwd = encoding.getcwd() | |
1300 |
|
1300 | |||
1301 | created = False |
|
1301 | created = False | |
1302 | if os.path.isfile(os.path.join(path, b'.svn', b'entries')): |
|
1302 | if os.path.isfile(os.path.join(path, b'.svn', b'entries')): | |
1303 | self.wc = os.path.realpath(path) |
|
1303 | self.wc = os.path.realpath(path) | |
1304 | self.run0(b'update') |
|
1304 | self.run0(b'update') | |
1305 | else: |
|
1305 | else: | |
1306 | if not re.search(br'^(file|http|https|svn|svn\+ssh)://', path): |
|
1306 | if not re.search(br'^(file|http|https|svn|svn\+ssh)://', path): | |
1307 | path = os.path.realpath(path) |
|
1307 | path = os.path.realpath(path) | |
1308 | if os.path.isdir(os.path.dirname(path)): |
|
1308 | if os.path.isdir(os.path.dirname(path)): | |
1309 | if not os.path.exists( |
|
1309 | if not os.path.exists( | |
1310 | os.path.join(path, b'db', b'fs-type') |
|
1310 | os.path.join(path, b'db', b'fs-type') | |
1311 | ): |
|
1311 | ): | |
1312 | ui.status( |
|
1312 | ui.status( | |
1313 | _(b"initializing svn repository '%s'\n") |
|
1313 | _(b"initializing svn repository '%s'\n") | |
1314 | % os.path.basename(path) |
|
1314 | % os.path.basename(path) | |
1315 | ) |
|
1315 | ) | |
1316 | commandline(ui, b'svnadmin').run0(b'create', path) |
|
1316 | commandline(ui, b'svnadmin').run0(b'create', path) | |
1317 | created = path |
|
1317 | created = path | |
1318 | path = util.normpath(path) |
|
1318 | path = util.normpath(path) | |
1319 | if not path.startswith(b'/'): |
|
1319 | if not path.startswith(b'/'): | |
1320 | path = b'/' + path |
|
1320 | path = b'/' + path | |
1321 | path = b'file://' + path |
|
1321 | path = b'file://' + path | |
1322 |
|
1322 | |||
1323 | wcpath = os.path.join( |
|
1323 | wcpath = os.path.join( | |
1324 | encoding.getcwd(), os.path.basename(path) + b'-wc' |
|
1324 | encoding.getcwd(), os.path.basename(path) + b'-wc' | |
1325 | ) |
|
1325 | ) | |
1326 | ui.status( |
|
1326 | ui.status( | |
1327 | _(b"initializing svn working copy '%s'\n") |
|
1327 | _(b"initializing svn working copy '%s'\n") | |
1328 | % os.path.basename(wcpath) |
|
1328 | % os.path.basename(wcpath) | |
1329 | ) |
|
1329 | ) | |
1330 | self.run0(b'checkout', path, wcpath) |
|
1330 | self.run0(b'checkout', path, wcpath) | |
1331 |
|
1331 | |||
1332 | self.wc = wcpath |
|
1332 | self.wc = wcpath | |
1333 | self.opener = vfsmod.vfs(self.wc) |
|
1333 | self.opener = vfsmod.vfs(self.wc) | |
1334 | self.wopener = vfsmod.vfs(self.wc) |
|
1334 | self.wopener = vfsmod.vfs(self.wc) | |
1335 | self.childmap = mapfile(ui, self.join(b'hg-childmap')) |
|
1335 | self.childmap = mapfile(ui, self.join(b'hg-childmap')) | |
1336 | if util.checkexec(self.wc): |
|
1336 | if util.checkexec(self.wc): | |
1337 | self.is_exec = util.isexec |
|
1337 | self.is_exec = util.isexec | |
1338 | else: |
|
1338 | else: | |
1339 | self.is_exec = None |
|
1339 | self.is_exec = None | |
1340 |
|
1340 | |||
1341 | if created: |
|
1341 | if created: | |
1342 | hook = os.path.join(created, b'hooks', b'pre-revprop-change') |
|
1342 | hook = os.path.join(created, b'hooks', b'pre-revprop-change') | |
1343 | fp = open(hook, b'wb') |
|
1343 | fp = open(hook, b'wb') | |
1344 | fp.write(pre_revprop_change) |
|
1344 | fp.write(pre_revprop_change) | |
1345 | fp.close() |
|
1345 | fp.close() | |
1346 | util.setflags(hook, False, True) |
|
1346 | util.setflags(hook, False, True) | |
1347 |
|
1347 | |||
1348 | output = self.run0(b'info') |
|
1348 | output = self.run0(b'info') | |
1349 | self.uuid = self.uuid_re.search(output).group(1).strip() |
|
1349 | self.uuid = self.uuid_re.search(output).group(1).strip() | |
1350 |
|
1350 | |||
1351 | def wjoin(self, *names): |
|
1351 | def wjoin(self, *names): | |
1352 | return os.path.join(self.wc, *names) |
|
1352 | return os.path.join(self.wc, *names) | |
1353 |
|
1353 | |||
1354 | @propertycache |
|
1354 | @propertycache | |
1355 | def manifest(self): |
|
1355 | def manifest(self): | |
1356 | # As of svn 1.7, the "add" command fails when receiving |
|
1356 | # As of svn 1.7, the "add" command fails when receiving | |
1357 | # already tracked entries, so we have to track and filter them |
|
1357 | # already tracked entries, so we have to track and filter them | |
1358 | # ourselves. |
|
1358 | # ourselves. | |
1359 | m = set() |
|
1359 | m = set() | |
1360 | output = self.run0(b'ls', recursive=True, xml=True) |
|
1360 | output = self.run0(b'ls', recursive=True, xml=True) | |
1361 | doc = xml.dom.minidom.parseString(output) |
|
1361 | doc = xml.dom.minidom.parseString(output) | |
1362 | for e in doc.getElementsByTagName('entry'): |
|
1362 | for e in doc.getElementsByTagName('entry'): | |
1363 | for n in e.childNodes: |
|
1363 | for n in e.childNodes: | |
1364 | if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name': |
|
1364 | if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name': | |
1365 | continue |
|
1365 | continue | |
1366 | name = ''.join( |
|
1366 | name = ''.join( | |
1367 | c.data for c in n.childNodes if c.nodeType == c.TEXT_NODE |
|
1367 | c.data for c in n.childNodes if c.nodeType == c.TEXT_NODE | |
1368 | ) |
|
1368 | ) | |
1369 | # Entries are compared with names coming from |
|
1369 | # Entries are compared with names coming from | |
1370 | # mercurial, so bytes with undefined encoding. Our |
|
1370 | # mercurial, so bytes with undefined encoding. Our | |
1371 | # best bet is to assume they are in local |
|
1371 | # best bet is to assume they are in local | |
1372 | # encoding. They will be passed to command line calls |
|
1372 | # encoding. They will be passed to command line calls | |
1373 | # later anyway, so they better be. |
|
1373 | # later anyway, so they better be. | |
1374 | m.add(encoding.unitolocal(name)) |
|
1374 | m.add(encoding.unitolocal(name)) | |
1375 | break |
|
1375 | break | |
1376 | return m |
|
1376 | return m | |
1377 |
|
1377 | |||
1378 | def putfile(self, filename, flags, data): |
|
1378 | def putfile(self, filename, flags, data): | |
1379 | if b'l' in flags: |
|
1379 | if b'l' in flags: | |
1380 | self.wopener.symlink(data, filename) |
|
1380 | self.wopener.symlink(data, filename) | |
1381 | else: |
|
1381 | else: | |
1382 | try: |
|
1382 | try: | |
1383 | if os.path.islink(self.wjoin(filename)): |
|
1383 | if os.path.islink(self.wjoin(filename)): | |
1384 | os.unlink(filename) |
|
1384 | os.unlink(filename) | |
1385 | except OSError: |
|
1385 | except OSError: | |
1386 | pass |
|
1386 | pass | |
1387 |
|
1387 | |||
1388 | if self.is_exec: |
|
1388 | if self.is_exec: | |
1389 | # We need to check executability of the file before the change, |
|
1389 | # We need to check executability of the file before the change, | |
1390 | # because `vfs.write` is able to reset exec bit. |
|
1390 | # because `vfs.write` is able to reset exec bit. | |
1391 | wasexec = False |
|
1391 | wasexec = False | |
1392 | if os.path.exists(self.wjoin(filename)): |
|
1392 | if os.path.exists(self.wjoin(filename)): | |
1393 | wasexec = self.is_exec(self.wjoin(filename)) |
|
1393 | wasexec = self.is_exec(self.wjoin(filename)) | |
1394 |
|
1394 | |||
1395 | self.wopener.write(filename, data) |
|
1395 | self.wopener.write(filename, data) | |
1396 |
|
1396 | |||
1397 | if self.is_exec: |
|
1397 | if self.is_exec: | |
1398 | if wasexec: |
|
1398 | if wasexec: | |
1399 | if b'x' not in flags: |
|
1399 | if b'x' not in flags: | |
1400 | self.delexec.append(filename) |
|
1400 | self.delexec.append(filename) | |
1401 | else: |
|
1401 | else: | |
1402 | if b'x' in flags: |
|
1402 | if b'x' in flags: | |
1403 | self.setexec.append(filename) |
|
1403 | self.setexec.append(filename) | |
1404 | util.setflags(self.wjoin(filename), False, b'x' in flags) |
|
1404 | util.setflags(self.wjoin(filename), False, b'x' in flags) | |
1405 |
|
1405 | |||
1406 | def _copyfile(self, source, dest): |
|
1406 | def _copyfile(self, source, dest): | |
1407 | # SVN's copy command pukes if the destination file exists, but |
|
1407 | # SVN's copy command pukes if the destination file exists, but | |
1408 | # our copyfile method expects to record a copy that has |
|
1408 | # our copyfile method expects to record a copy that has | |
1409 | # already occurred. Cross the semantic gap. |
|
1409 | # already occurred. Cross the semantic gap. | |
1410 | wdest = self.wjoin(dest) |
|
1410 | wdest = self.wjoin(dest) | |
1411 | exists = os.path.lexists(wdest) |
|
1411 | exists = os.path.lexists(wdest) | |
1412 | if exists: |
|
1412 | if exists: | |
1413 | fd, tempname = pycompat.mkstemp( |
|
1413 | fd, tempname = pycompat.mkstemp( | |
1414 | prefix=b'hg-copy-', dir=os.path.dirname(wdest) |
|
1414 | prefix=b'hg-copy-', dir=os.path.dirname(wdest) | |
1415 | ) |
|
1415 | ) | |
1416 | os.close(fd) |
|
1416 | os.close(fd) | |
1417 | os.unlink(tempname) |
|
1417 | os.unlink(tempname) | |
1418 | os.rename(wdest, tempname) |
|
1418 | os.rename(wdest, tempname) | |
1419 | try: |
|
1419 | try: | |
1420 | self.run0(b'copy', source, dest) |
|
1420 | self.run0(b'copy', source, dest) | |
1421 | finally: |
|
1421 | finally: | |
1422 | self.manifest.add(dest) |
|
1422 | self.manifest.add(dest) | |
1423 | if exists: |
|
1423 | if exists: | |
1424 | try: |
|
1424 | try: | |
1425 | os.unlink(wdest) |
|
1425 | os.unlink(wdest) | |
1426 | except OSError: |
|
1426 | except OSError: | |
1427 | pass |
|
1427 | pass | |
1428 | os.rename(tempname, wdest) |
|
1428 | os.rename(tempname, wdest) | |
1429 |
|
1429 | |||
1430 | def dirs_of(self, files): |
|
1430 | def dirs_of(self, files): | |
1431 | dirs = set() |
|
1431 | dirs = set() | |
1432 | for f in files: |
|
1432 | for f in files: | |
1433 | if os.path.isdir(self.wjoin(f)): |
|
1433 | if os.path.isdir(self.wjoin(f)): | |
1434 | dirs.add(f) |
|
1434 | dirs.add(f) | |
1435 | i = len(f) |
|
1435 | i = len(f) | |
1436 | for i in iter(lambda: f.rfind(b'/', 0, i), -1): |
|
1436 | for i in iter(lambda: f.rfind(b'/', 0, i), -1): | |
1437 | dirs.add(f[:i]) |
|
1437 | dirs.add(f[:i]) | |
1438 | return dirs |
|
1438 | return dirs | |
1439 |
|
1439 | |||
1440 | def add_dirs(self, files): |
|
1440 | def add_dirs(self, files): | |
1441 | add_dirs = [ |
|
1441 | add_dirs = [ | |
1442 | d for d in sorted(self.dirs_of(files)) if d not in self.manifest |
|
1442 | d for d in sorted(self.dirs_of(files)) if d not in self.manifest | |
1443 | ] |
|
1443 | ] | |
1444 | if add_dirs: |
|
1444 | if add_dirs: | |
1445 | self.manifest.update(add_dirs) |
|
1445 | self.manifest.update(add_dirs) | |
1446 | self.xargs(add_dirs, b'add', non_recursive=True, quiet=True) |
|
1446 | self.xargs(add_dirs, b'add', non_recursive=True, quiet=True) | |
1447 | return add_dirs |
|
1447 | return add_dirs | |
1448 |
|
1448 | |||
1449 | def add_files(self, files): |
|
1449 | def add_files(self, files): | |
1450 | files = [f for f in files if f not in self.manifest] |
|
1450 | files = [f for f in files if f not in self.manifest] | |
1451 | if files: |
|
1451 | if files: | |
1452 | self.manifest.update(files) |
|
1452 | self.manifest.update(files) | |
1453 | self.xargs(files, b'add', quiet=True) |
|
1453 | self.xargs(files, b'add', quiet=True) | |
1454 | return files |
|
1454 | return files | |
1455 |
|
1455 | |||
1456 | def addchild(self, parent, child): |
|
1456 | def addchild(self, parent, child): | |
1457 | self.childmap[parent] = child |
|
1457 | self.childmap[parent] = child | |
1458 |
|
1458 | |||
1459 | def revid(self, rev): |
|
1459 | def revid(self, rev): | |
1460 | return b"svn:%s@%s" % (self.uuid, rev) |
|
1460 | return b"svn:%s@%s" % (self.uuid, rev) | |
1461 |
|
1461 | |||
1462 | def putcommit( |
|
1462 | def putcommit( | |
1463 | self, files, copies, parents, commit, source, revmap, full, cleanp2 |
|
1463 | self, files, copies, parents, commit, source, revmap, full, cleanp2 | |
1464 | ): |
|
1464 | ): | |
1465 | for parent in parents: |
|
1465 | for parent in parents: | |
1466 | try: |
|
1466 | try: | |
1467 | return self.revid(self.childmap[parent]) |
|
1467 | return self.revid(self.childmap[parent]) | |
1468 | except KeyError: |
|
1468 | except KeyError: | |
1469 | pass |
|
1469 | pass | |
1470 |
|
1470 | |||
1471 | # Apply changes to working copy |
|
1471 | # Apply changes to working copy | |
1472 | for f, v in files: |
|
1472 | for f, v in files: | |
1473 | data, mode = source.getfile(f, v) |
|
1473 | data, mode = source.getfile(f, v) | |
1474 | if data is None: |
|
1474 | if data is None: | |
1475 | self.delete.append(f) |
|
1475 | self.delete.append(f) | |
1476 | else: |
|
1476 | else: | |
1477 | self.putfile(f, mode, data) |
|
1477 | self.putfile(f, mode, data) | |
1478 | if f in copies: |
|
1478 | if f in copies: | |
1479 | self.copies.append([copies[f], f]) |
|
1479 | self.copies.append([copies[f], f]) | |
1480 | if full: |
|
1480 | if full: | |
1481 | self.delete.extend(sorted(self.manifest.difference(files))) |
|
1481 | self.delete.extend(sorted(self.manifest.difference(files))) | |
1482 | files = [f[0] for f in files] |
|
1482 | files = [f[0] for f in files] | |
1483 |
|
1483 | |||
1484 | entries = set(self.delete) |
|
1484 | entries = set(self.delete) | |
1485 | files = frozenset(files) |
|
1485 | files = frozenset(files) | |
1486 | entries.update(self.add_dirs(files.difference(entries))) |
|
1486 | entries.update(self.add_dirs(files.difference(entries))) | |
1487 | if self.copies: |
|
1487 | if self.copies: | |
1488 | for s, d in self.copies: |
|
1488 | for s, d in self.copies: | |
1489 | self._copyfile(s, d) |
|
1489 | self._copyfile(s, d) | |
1490 | self.copies = [] |
|
1490 | self.copies = [] | |
1491 | if self.delete: |
|
1491 | if self.delete: | |
1492 | self.xargs(self.delete, b'delete') |
|
1492 | self.xargs(self.delete, b'delete') | |
1493 | for f in self.delete: |
|
1493 | for f in self.delete: | |
1494 | self.manifest.remove(f) |
|
1494 | self.manifest.remove(f) | |
1495 | self.delete = [] |
|
1495 | self.delete = [] | |
1496 | entries.update(self.add_files(files.difference(entries))) |
|
1496 | entries.update(self.add_files(files.difference(entries))) | |
1497 | if self.delexec: |
|
1497 | if self.delexec: | |
1498 | self.xargs(self.delexec, b'propdel', b'svn:executable') |
|
1498 | self.xargs(self.delexec, b'propdel', b'svn:executable') | |
1499 | self.delexec = [] |
|
1499 | self.delexec = [] | |
1500 | if self.setexec: |
|
1500 | if self.setexec: | |
1501 | self.xargs(self.setexec, b'propset', b'svn:executable', b'*') |
|
1501 | self.xargs(self.setexec, b'propset', b'svn:executable', b'*') | |
1502 | self.setexec = [] |
|
1502 | self.setexec = [] | |
1503 |
|
1503 | |||
1504 | fd, messagefile = pycompat.mkstemp(prefix=b'hg-convert-') |
|
1504 | fd, messagefile = pycompat.mkstemp(prefix=b'hg-convert-') | |
1505 | fp = os.fdopen(fd, 'wb') |
|
1505 | fp = os.fdopen(fd, 'wb') | |
1506 | fp.write(util.tonativeeol(commit.desc)) |
|
1506 | fp.write(util.tonativeeol(commit.desc)) | |
1507 | fp.close() |
|
1507 | fp.close() | |
1508 | try: |
|
1508 | try: | |
1509 | output = self.run0( |
|
1509 | output = self.run0( | |
1510 | b'commit', |
|
1510 | b'commit', | |
1511 | username=stringutil.shortuser(commit.author), |
|
1511 | username=stringutil.shortuser(commit.author), | |
1512 | file=messagefile, |
|
1512 | file=messagefile, | |
1513 | encoding=b'utf-8', |
|
1513 | encoding=b'utf-8', | |
1514 | ) |
|
1514 | ) | |
1515 | try: |
|
1515 | try: | |
1516 | rev = self.commit_re.search(output).group(1) |
|
1516 | rev = self.commit_re.search(output).group(1) | |
1517 | except AttributeError: |
|
1517 | except AttributeError: | |
1518 | if not files: |
|
1518 | if not files: | |
1519 | return parents[0] if parents else b'None' |
|
1519 | return parents[0] if parents else b'None' | |
1520 | self.ui.warn(_(b'unexpected svn output:\n')) |
|
1520 | self.ui.warn(_(b'unexpected svn output:\n')) | |
1521 | self.ui.warn(output) |
|
1521 | self.ui.warn(output) | |
1522 | raise error.Abort(_(b'unable to cope with svn output')) |
|
1522 | raise error.Abort(_(b'unable to cope with svn output')) | |
1523 | if commit.rev: |
|
1523 | if commit.rev: | |
1524 | self.run( |
|
1524 | self.run( | |
1525 | b'propset', |
|
1525 | b'propset', | |
1526 | b'hg:convert-rev', |
|
1526 | b'hg:convert-rev', | |
1527 | commit.rev, |
|
1527 | commit.rev, | |
1528 | revprop=True, |
|
1528 | revprop=True, | |
1529 | revision=rev, |
|
1529 | revision=rev, | |
1530 | ) |
|
1530 | ) | |
1531 | if commit.branch and commit.branch != b'default': |
|
1531 | if commit.branch and commit.branch != b'default': | |
1532 | self.run( |
|
1532 | self.run( | |
1533 | b'propset', |
|
1533 | b'propset', | |
1534 | b'hg:convert-branch', |
|
1534 | b'hg:convert-branch', | |
1535 | commit.branch, |
|
1535 | commit.branch, | |
1536 | revprop=True, |
|
1536 | revprop=True, | |
1537 | revision=rev, |
|
1537 | revision=rev, | |
1538 | ) |
|
1538 | ) | |
1539 | for parent in parents: |
|
1539 | for parent in parents: | |
1540 | self.addchild(parent, rev) |
|
1540 | self.addchild(parent, rev) | |
1541 | return self.revid(rev) |
|
1541 | return self.revid(rev) | |
1542 | finally: |
|
1542 | finally: | |
1543 | os.unlink(messagefile) |
|
1543 | os.unlink(messagefile) | |
1544 |
|
1544 | |||
1545 | def puttags(self, tags): |
|
1545 | def puttags(self, tags): | |
1546 | self.ui.warn(_(b'writing Subversion tags is not yet implemented\n')) |
|
1546 | self.ui.warn(_(b'writing Subversion tags is not yet implemented\n')) | |
1547 | return None, None |
|
1547 | return None, None | |
1548 |
|
1548 | |||
1549 | def hascommitfrommap(self, rev): |
|
1549 | def hascommitfrommap(self, rev): | |
1550 | # We trust that revisions referenced in a map still is present |
|
1550 | # We trust that revisions referenced in a map still is present | |
1551 | # TODO: implement something better if necessary and feasible |
|
1551 | # TODO: implement something better if necessary and feasible | |
1552 | return True |
|
1552 | return True | |
1553 |
|
1553 | |||
1554 | def hascommitforsplicemap(self, rev): |
|
1554 | def hascommitforsplicemap(self, rev): | |
1555 | # This is not correct as one can convert to an existing subversion |
|
1555 | # This is not correct as one can convert to an existing subversion | |
1556 | # repository and childmap would not list all revisions. Too bad. |
|
1556 | # repository and childmap would not list all revisions. Too bad. | |
1557 | if rev in self.childmap: |
|
1557 | if rev in self.childmap: | |
1558 | return True |
|
1558 | return True | |
1559 | raise error.Abort( |
|
1559 | raise error.Abort( | |
1560 | _( |
|
1560 | _( | |
1561 | b'splice map revision %s not found in subversion ' |
|
1561 | b'splice map revision %s not found in subversion ' | |
1562 | b'child map (revision lookups are not implemented)' |
|
1562 | b'child map (revision lookups are not implemented)' | |
1563 | ) |
|
1563 | ) | |
1564 | % rev |
|
1564 | % rev | |
1565 | ) |
|
1565 | ) |
@@ -1,720 +1,719 b'' | |||||
1 | # extdiff.py - external diff program support for mercurial |
|
1 | # extdiff.py - external diff program support for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''command to allow external programs to compare revisions |
|
8 | '''command to allow external programs to compare revisions | |
9 |
|
9 | |||
10 | The extdiff Mercurial extension allows you to use external programs |
|
10 | The extdiff Mercurial extension allows you to use external programs | |
11 | to compare revisions, or revision with working directory. The external |
|
11 | to compare revisions, or revision with working directory. The external | |
12 | diff programs are called with a configurable set of options and two |
|
12 | diff programs are called with a configurable set of options and two | |
13 | non-option arguments: paths to directories containing snapshots of |
|
13 | non-option arguments: paths to directories containing snapshots of | |
14 | files to compare. |
|
14 | files to compare. | |
15 |
|
15 | |||
16 | If there is more than one file being compared and the "child" revision |
|
16 | If there is more than one file being compared and the "child" revision | |
17 | is the working directory, any modifications made in the external diff |
|
17 | is the working directory, any modifications made in the external diff | |
18 | program will be copied back to the working directory from the temporary |
|
18 | program will be copied back to the working directory from the temporary | |
19 | directory. |
|
19 | directory. | |
20 |
|
20 | |||
21 | The extdiff extension also allows you to configure new diff commands, so |
|
21 | The extdiff extension also allows you to configure new diff commands, so | |
22 | you do not need to type :hg:`extdiff -p kdiff3` always. :: |
|
22 | you do not need to type :hg:`extdiff -p kdiff3` always. :: | |
23 |
|
23 | |||
24 | [extdiff] |
|
24 | [extdiff] | |
25 | # add new command that runs GNU diff(1) in 'context diff' mode |
|
25 | # add new command that runs GNU diff(1) in 'context diff' mode | |
26 | cdiff = gdiff -Nprc5 |
|
26 | cdiff = gdiff -Nprc5 | |
27 | ## or the old way: |
|
27 | ## or the old way: | |
28 | #cmd.cdiff = gdiff |
|
28 | #cmd.cdiff = gdiff | |
29 | #opts.cdiff = -Nprc5 |
|
29 | #opts.cdiff = -Nprc5 | |
30 |
|
30 | |||
31 | # add new command called meld, runs meld (no need to name twice). If |
|
31 | # add new command called meld, runs meld (no need to name twice). If | |
32 | # the meld executable is not available, the meld tool in [merge-tools] |
|
32 | # the meld executable is not available, the meld tool in [merge-tools] | |
33 | # will be used, if available |
|
33 | # will be used, if available | |
34 | meld = |
|
34 | meld = | |
35 |
|
35 | |||
36 | # add new command called vimdiff, runs gvimdiff with DirDiff plugin |
|
36 | # add new command called vimdiff, runs gvimdiff with DirDiff plugin | |
37 | # (see http://www.vim.org/scripts/script.php?script_id=102) Non |
|
37 | # (see http://www.vim.org/scripts/script.php?script_id=102) Non | |
38 | # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in |
|
38 | # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in | |
39 | # your .vimrc |
|
39 | # your .vimrc | |
40 | vimdiff = gvim -f "+next" \\ |
|
40 | vimdiff = gvim -f "+next" \\ | |
41 | "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))" |
|
41 | "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))" | |
42 |
|
42 | |||
43 | Tool arguments can include variables that are expanded at runtime:: |
|
43 | Tool arguments can include variables that are expanded at runtime:: | |
44 |
|
44 | |||
45 | $parent1, $plabel1 - filename, descriptive label of first parent |
|
45 | $parent1, $plabel1 - filename, descriptive label of first parent | |
46 | $child, $clabel - filename, descriptive label of child revision |
|
46 | $child, $clabel - filename, descriptive label of child revision | |
47 | $parent2, $plabel2 - filename, descriptive label of second parent |
|
47 | $parent2, $plabel2 - filename, descriptive label of second parent | |
48 | $root - repository root |
|
48 | $root - repository root | |
49 | $parent is an alias for $parent1. |
|
49 | $parent is an alias for $parent1. | |
50 |
|
50 | |||
51 | The extdiff extension will look in your [diff-tools] and [merge-tools] |
|
51 | The extdiff extension will look in your [diff-tools] and [merge-tools] | |
52 | sections for diff tool arguments, when none are specified in [extdiff]. |
|
52 | sections for diff tool arguments, when none are specified in [extdiff]. | |
53 |
|
53 | |||
54 | :: |
|
54 | :: | |
55 |
|
55 | |||
56 | [extdiff] |
|
56 | [extdiff] | |
57 | kdiff3 = |
|
57 | kdiff3 = | |
58 |
|
58 | |||
59 | [diff-tools] |
|
59 | [diff-tools] | |
60 | kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child |
|
60 | kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child | |
61 |
|
61 | |||
62 | If a program has a graphical interface, it might be interesting to tell |
|
62 | If a program has a graphical interface, it might be interesting to tell | |
63 | Mercurial about it. It will prevent the program from being mistakenly |
|
63 | Mercurial about it. It will prevent the program from being mistakenly | |
64 | used in a terminal-only environment (such as an SSH terminal session), |
|
64 | used in a terminal-only environment (such as an SSH terminal session), | |
65 | and will make :hg:`extdiff --per-file` open multiple file diffs at once |
|
65 | and will make :hg:`extdiff --per-file` open multiple file diffs at once | |
66 | instead of one by one (if you still want to open file diffs one by one, |
|
66 | instead of one by one (if you still want to open file diffs one by one, | |
67 | you can use the --confirm option). |
|
67 | you can use the --confirm option). | |
68 |
|
68 | |||
69 | Declaring that a tool has a graphical interface can be done with the |
|
69 | Declaring that a tool has a graphical interface can be done with the | |
70 | ``gui`` flag next to where ``diffargs`` are specified: |
|
70 | ``gui`` flag next to where ``diffargs`` are specified: | |
71 |
|
71 | |||
72 | :: |
|
72 | :: | |
73 |
|
73 | |||
74 | [diff-tools] |
|
74 | [diff-tools] | |
75 | kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child |
|
75 | kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child | |
76 | kdiff3.gui = true |
|
76 | kdiff3.gui = true | |
77 |
|
77 | |||
78 | You can use -I/-X and list of file or directory names like normal |
|
78 | You can use -I/-X and list of file or directory names like normal | |
79 | :hg:`diff` command. The extdiff extension makes snapshots of only |
|
79 | :hg:`diff` command. The extdiff extension makes snapshots of only | |
80 | needed files, so running the external diff program will actually be |
|
80 | needed files, so running the external diff program will actually be | |
81 | pretty fast (at least faster than having to compare the entire tree). |
|
81 | pretty fast (at least faster than having to compare the entire tree). | |
82 | ''' |
|
82 | ''' | |
83 |
|
83 | |||
84 | from __future__ import absolute_import |
|
84 | from __future__ import absolute_import | |
85 |
|
85 | |||
86 | import os |
|
86 | import os | |
87 | import re |
|
87 | import re | |
88 | import shutil |
|
88 | import shutil | |
89 | import stat |
|
89 | import stat | |
90 | import subprocess |
|
90 | import subprocess | |
91 |
|
91 | |||
92 | from mercurial.i18n import _ |
|
92 | from mercurial.i18n import _ | |
93 | from mercurial.node import ( |
|
93 | from mercurial.node import ( | |
94 | nullid, |
|
94 | nullid, | |
95 | short, |
|
95 | short, | |
96 | ) |
|
96 | ) | |
97 | from mercurial import ( |
|
97 | from mercurial import ( | |
98 | archival, |
|
98 | archival, | |
99 | cmdutil, |
|
99 | cmdutil, | |
100 | encoding, |
|
100 | encoding, | |
101 | error, |
|
101 | error, | |
102 | filemerge, |
|
102 | filemerge, | |
103 | formatter, |
|
103 | formatter, | |
104 | pycompat, |
|
104 | pycompat, | |
105 | registrar, |
|
105 | registrar, | |
106 | scmutil, |
|
106 | scmutil, | |
107 | util, |
|
107 | util, | |
108 | ) |
|
108 | ) | |
109 | from mercurial.utils import ( |
|
109 | from mercurial.utils import ( | |
110 | procutil, |
|
110 | procutil, | |
111 | stringutil, |
|
111 | stringutil, | |
112 | ) |
|
112 | ) | |
113 |
|
113 | |||
114 | cmdtable = {} |
|
114 | cmdtable = {} | |
115 | command = registrar.command(cmdtable) |
|
115 | command = registrar.command(cmdtable) | |
116 |
|
116 | |||
117 | configtable = {} |
|
117 | configtable = {} | |
118 | configitem = registrar.configitem(configtable) |
|
118 | configitem = registrar.configitem(configtable) | |
119 |
|
119 | |||
120 | configitem( |
|
120 | configitem( | |
121 | b'extdiff', br'opts\..*', default=b'', generic=True, |
|
121 | b'extdiff', br'opts\..*', default=b'', generic=True, | |
122 | ) |
|
122 | ) | |
123 |
|
123 | |||
124 | configitem( |
|
124 | configitem( | |
125 | b'extdiff', br'gui\..*', generic=True, |
|
125 | b'extdiff', br'gui\..*', generic=True, | |
126 | ) |
|
126 | ) | |
127 |
|
127 | |||
128 | configitem( |
|
128 | configitem( | |
129 | b'diff-tools', br'.*\.diffargs$', default=None, generic=True, |
|
129 | b'diff-tools', br'.*\.diffargs$', default=None, generic=True, | |
130 | ) |
|
130 | ) | |
131 |
|
131 | |||
132 | configitem( |
|
132 | configitem( | |
133 | b'diff-tools', br'.*\.gui$', generic=True, |
|
133 | b'diff-tools', br'.*\.gui$', generic=True, | |
134 | ) |
|
134 | ) | |
135 |
|
135 | |||
136 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
136 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
137 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
137 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
138 | # be specifying the version(s) of Mercurial they are tested with, or |
|
138 | # be specifying the version(s) of Mercurial they are tested with, or | |
139 | # leave the attribute unspecified. |
|
139 | # leave the attribute unspecified. | |
140 | testedwith = b'ships-with-hg-core' |
|
140 | testedwith = b'ships-with-hg-core' | |
141 |
|
141 | |||
142 |
|
142 | |||
143 | def snapshot(ui, repo, files, node, tmproot, listsubrepos): |
|
143 | def snapshot(ui, repo, files, node, tmproot, listsubrepos): | |
144 | '''snapshot files as of some revision |
|
144 | '''snapshot files as of some revision | |
145 | if not using snapshot, -I/-X does not work and recursive diff |
|
145 | if not using snapshot, -I/-X does not work and recursive diff | |
146 | in tools like kdiff3 and meld displays too many files.''' |
|
146 | in tools like kdiff3 and meld displays too many files.''' | |
147 | dirname = os.path.basename(repo.root) |
|
147 | dirname = os.path.basename(repo.root) | |
148 | if dirname == b"": |
|
148 | if dirname == b"": | |
149 | dirname = b"root" |
|
149 | dirname = b"root" | |
150 | if node is not None: |
|
150 | if node is not None: | |
151 | dirname = b'%s.%s' % (dirname, short(node)) |
|
151 | dirname = b'%s.%s' % (dirname, short(node)) | |
152 | base = os.path.join(tmproot, dirname) |
|
152 | base = os.path.join(tmproot, dirname) | |
153 | os.mkdir(base) |
|
153 | os.mkdir(base) | |
154 | fnsandstat = [] |
|
154 | fnsandstat = [] | |
155 |
|
155 | |||
156 | if node is not None: |
|
156 | if node is not None: | |
157 | ui.note( |
|
157 | ui.note( | |
158 | _(b'making snapshot of %d files from rev %s\n') |
|
158 | _(b'making snapshot of %d files from rev %s\n') | |
159 | % (len(files), short(node)) |
|
159 | % (len(files), short(node)) | |
160 | ) |
|
160 | ) | |
161 | else: |
|
161 | else: | |
162 | ui.note( |
|
162 | ui.note( | |
163 | _(b'making snapshot of %d files from working directory\n') |
|
163 | _(b'making snapshot of %d files from working directory\n') | |
164 | % (len(files)) |
|
164 | % (len(files)) | |
165 | ) |
|
165 | ) | |
166 |
|
166 | |||
167 | if files: |
|
167 | if files: | |
168 | repo.ui.setconfig(b"ui", b"archivemeta", False) |
|
168 | repo.ui.setconfig(b"ui", b"archivemeta", False) | |
169 |
|
169 | |||
170 | archival.archive( |
|
170 | archival.archive( | |
171 | repo, |
|
171 | repo, | |
172 | base, |
|
172 | base, | |
173 | node, |
|
173 | node, | |
174 | b'files', |
|
174 | b'files', | |
175 | match=scmutil.matchfiles(repo, files), |
|
175 | match=scmutil.matchfiles(repo, files), | |
176 | subrepos=listsubrepos, |
|
176 | subrepos=listsubrepos, | |
177 | ) |
|
177 | ) | |
178 |
|
178 | |||
179 | for fn in sorted(files): |
|
179 | for fn in sorted(files): | |
180 | wfn = util.pconvert(fn) |
|
180 | wfn = util.pconvert(fn) | |
181 | ui.note(b' %s\n' % wfn) |
|
181 | ui.note(b' %s\n' % wfn) | |
182 |
|
182 | |||
183 | if node is None: |
|
183 | if node is None: | |
184 | dest = os.path.join(base, wfn) |
|
184 | dest = os.path.join(base, wfn) | |
185 |
|
185 | |||
186 | fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest))) |
|
186 | fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest))) | |
187 | return dirname, fnsandstat |
|
187 | return dirname, fnsandstat | |
188 |
|
188 | |||
189 |
|
189 | |||
190 | def formatcmdline( |
|
190 | def formatcmdline( | |
191 | cmdline, |
|
191 | cmdline, | |
192 | repo_root, |
|
192 | repo_root, | |
193 | do3way, |
|
193 | do3way, | |
194 | parent1, |
|
194 | parent1, | |
195 | plabel1, |
|
195 | plabel1, | |
196 | parent2, |
|
196 | parent2, | |
197 | plabel2, |
|
197 | plabel2, | |
198 | child, |
|
198 | child, | |
199 | clabel, |
|
199 | clabel, | |
200 | ): |
|
200 | ): | |
201 | # Function to quote file/dir names in the argument string. |
|
201 | # Function to quote file/dir names in the argument string. | |
202 | # When not operating in 3-way mode, an empty string is |
|
202 | # When not operating in 3-way mode, an empty string is | |
203 | # returned for parent2 |
|
203 | # returned for parent2 | |
204 | replace = { |
|
204 | replace = { | |
205 | b'parent': parent1, |
|
205 | b'parent': parent1, | |
206 | b'parent1': parent1, |
|
206 | b'parent1': parent1, | |
207 | b'parent2': parent2, |
|
207 | b'parent2': parent2, | |
208 | b'plabel1': plabel1, |
|
208 | b'plabel1': plabel1, | |
209 | b'plabel2': plabel2, |
|
209 | b'plabel2': plabel2, | |
210 | b'child': child, |
|
210 | b'child': child, | |
211 | b'clabel': clabel, |
|
211 | b'clabel': clabel, | |
212 | b'root': repo_root, |
|
212 | b'root': repo_root, | |
213 | } |
|
213 | } | |
214 |
|
214 | |||
215 | def quote(match): |
|
215 | def quote(match): | |
216 | pre = match.group(2) |
|
216 | pre = match.group(2) | |
217 | key = match.group(3) |
|
217 | key = match.group(3) | |
218 | if not do3way and key == b'parent2': |
|
218 | if not do3way and key == b'parent2': | |
219 | return pre |
|
219 | return pre | |
220 | return pre + procutil.shellquote(replace[key]) |
|
220 | return pre + procutil.shellquote(replace[key]) | |
221 |
|
221 | |||
222 | # Match parent2 first, so 'parent1?' will match both parent1 and parent |
|
222 | # Match parent2 first, so 'parent1?' will match both parent1 and parent | |
223 | regex = ( |
|
223 | regex = ( | |
224 | br'''(['"]?)([^\s'"$]*)''' |
|
224 | br'''(['"]?)([^\s'"$]*)''' | |
225 | br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1' |
|
225 | br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1' | |
226 | ) |
|
226 | ) | |
227 | if not do3way and not re.search(regex, cmdline): |
|
227 | if not do3way and not re.search(regex, cmdline): | |
228 | cmdline += b' $parent1 $child' |
|
228 | cmdline += b' $parent1 $child' | |
229 | return re.sub(regex, quote, cmdline) |
|
229 | return re.sub(regex, quote, cmdline) | |
230 |
|
230 | |||
231 |
|
231 | |||
232 | def _systembackground(cmd, environ=None, cwd=None): |
|
232 | def _systembackground(cmd, environ=None, cwd=None): | |
233 | ''' like 'procutil.system', but returns the Popen object directly |
|
233 | ''' like 'procutil.system', but returns the Popen object directly | |
234 | so we don't have to wait on it. |
|
234 | so we don't have to wait on it. | |
235 | ''' |
|
235 | ''' | |
236 | cmd = procutil.quotecommand(cmd) |
|
|||
237 | env = procutil.shellenviron(environ) |
|
236 | env = procutil.shellenviron(environ) | |
238 | proc = subprocess.Popen( |
|
237 | proc = subprocess.Popen( | |
239 | procutil.tonativestr(cmd), |
|
238 | procutil.tonativestr(cmd), | |
240 | shell=True, |
|
239 | shell=True, | |
241 | close_fds=procutil.closefds, |
|
240 | close_fds=procutil.closefds, | |
242 | env=procutil.tonativeenv(env), |
|
241 | env=procutil.tonativeenv(env), | |
243 | cwd=pycompat.rapply(procutil.tonativestr, cwd), |
|
242 | cwd=pycompat.rapply(procutil.tonativestr, cwd), | |
244 | ) |
|
243 | ) | |
245 | return proc |
|
244 | return proc | |
246 |
|
245 | |||
247 |
|
246 | |||
248 | def _runperfilediff( |
|
247 | def _runperfilediff( | |
249 | cmdline, |
|
248 | cmdline, | |
250 | repo_root, |
|
249 | repo_root, | |
251 | ui, |
|
250 | ui, | |
252 | guitool, |
|
251 | guitool, | |
253 | do3way, |
|
252 | do3way, | |
254 | confirm, |
|
253 | confirm, | |
255 | commonfiles, |
|
254 | commonfiles, | |
256 | tmproot, |
|
255 | tmproot, | |
257 | dir1a, |
|
256 | dir1a, | |
258 | dir1b, |
|
257 | dir1b, | |
259 | dir2root, |
|
258 | dir2root, | |
260 | dir2, |
|
259 | dir2, | |
261 | rev1a, |
|
260 | rev1a, | |
262 | rev1b, |
|
261 | rev1b, | |
263 | rev2, |
|
262 | rev2, | |
264 | ): |
|
263 | ): | |
265 | # Note that we need to sort the list of files because it was |
|
264 | # Note that we need to sort the list of files because it was | |
266 | # built in an "unstable" way and it's annoying to get files in a |
|
265 | # built in an "unstable" way and it's annoying to get files in a | |
267 | # random order, especially when "confirm" mode is enabled. |
|
266 | # random order, especially when "confirm" mode is enabled. | |
268 | waitprocs = [] |
|
267 | waitprocs = [] | |
269 | totalfiles = len(commonfiles) |
|
268 | totalfiles = len(commonfiles) | |
270 | for idx, commonfile in enumerate(sorted(commonfiles)): |
|
269 | for idx, commonfile in enumerate(sorted(commonfiles)): | |
271 | path1a = os.path.join(tmproot, dir1a, commonfile) |
|
270 | path1a = os.path.join(tmproot, dir1a, commonfile) | |
272 | label1a = commonfile + rev1a |
|
271 | label1a = commonfile + rev1a | |
273 | if not os.path.isfile(path1a): |
|
272 | if not os.path.isfile(path1a): | |
274 | path1a = pycompat.osdevnull |
|
273 | path1a = pycompat.osdevnull | |
275 |
|
274 | |||
276 | path1b = b'' |
|
275 | path1b = b'' | |
277 | label1b = b'' |
|
276 | label1b = b'' | |
278 | if do3way: |
|
277 | if do3way: | |
279 | path1b = os.path.join(tmproot, dir1b, commonfile) |
|
278 | path1b = os.path.join(tmproot, dir1b, commonfile) | |
280 | label1b = commonfile + rev1b |
|
279 | label1b = commonfile + rev1b | |
281 | if not os.path.isfile(path1b): |
|
280 | if not os.path.isfile(path1b): | |
282 | path1b = pycompat.osdevnull |
|
281 | path1b = pycompat.osdevnull | |
283 |
|
282 | |||
284 | path2 = os.path.join(dir2root, dir2, commonfile) |
|
283 | path2 = os.path.join(dir2root, dir2, commonfile) | |
285 | label2 = commonfile + rev2 |
|
284 | label2 = commonfile + rev2 | |
286 |
|
285 | |||
287 | if confirm: |
|
286 | if confirm: | |
288 | # Prompt before showing this diff |
|
287 | # Prompt before showing this diff | |
289 | difffiles = _(b'diff %s (%d of %d)') % ( |
|
288 | difffiles = _(b'diff %s (%d of %d)') % ( | |
290 | commonfile, |
|
289 | commonfile, | |
291 | idx + 1, |
|
290 | idx + 1, | |
292 | totalfiles, |
|
291 | totalfiles, | |
293 | ) |
|
292 | ) | |
294 | responses = _( |
|
293 | responses = _( | |
295 | b'[Yns?]' |
|
294 | b'[Yns?]' | |
296 | b'$$ &Yes, show diff' |
|
295 | b'$$ &Yes, show diff' | |
297 | b'$$ &No, skip this diff' |
|
296 | b'$$ &No, skip this diff' | |
298 | b'$$ &Skip remaining diffs' |
|
297 | b'$$ &Skip remaining diffs' | |
299 | b'$$ &? (display help)' |
|
298 | b'$$ &? (display help)' | |
300 | ) |
|
299 | ) | |
301 | r = ui.promptchoice(b'%s %s' % (difffiles, responses)) |
|
300 | r = ui.promptchoice(b'%s %s' % (difffiles, responses)) | |
302 | if r == 3: # ? |
|
301 | if r == 3: # ? | |
303 | while r == 3: |
|
302 | while r == 3: | |
304 | for c, t in ui.extractchoices(responses)[1]: |
|
303 | for c, t in ui.extractchoices(responses)[1]: | |
305 | ui.write(b'%s - %s\n' % (c, encoding.lower(t))) |
|
304 | ui.write(b'%s - %s\n' % (c, encoding.lower(t))) | |
306 | r = ui.promptchoice(b'%s %s' % (difffiles, responses)) |
|
305 | r = ui.promptchoice(b'%s %s' % (difffiles, responses)) | |
307 | if r == 0: # yes |
|
306 | if r == 0: # yes | |
308 | pass |
|
307 | pass | |
309 | elif r == 1: # no |
|
308 | elif r == 1: # no | |
310 | continue |
|
309 | continue | |
311 | elif r == 2: # skip |
|
310 | elif r == 2: # skip | |
312 | break |
|
311 | break | |
313 |
|
312 | |||
314 | curcmdline = formatcmdline( |
|
313 | curcmdline = formatcmdline( | |
315 | cmdline, |
|
314 | cmdline, | |
316 | repo_root, |
|
315 | repo_root, | |
317 | do3way=do3way, |
|
316 | do3way=do3way, | |
318 | parent1=path1a, |
|
317 | parent1=path1a, | |
319 | plabel1=label1a, |
|
318 | plabel1=label1a, | |
320 | parent2=path1b, |
|
319 | parent2=path1b, | |
321 | plabel2=label1b, |
|
320 | plabel2=label1b, | |
322 | child=path2, |
|
321 | child=path2, | |
323 | clabel=label2, |
|
322 | clabel=label2, | |
324 | ) |
|
323 | ) | |
325 |
|
324 | |||
326 | if confirm or not guitool: |
|
325 | if confirm or not guitool: | |
327 | # Run the comparison program and wait for it to exit |
|
326 | # Run the comparison program and wait for it to exit | |
328 | # before we show the next file. |
|
327 | # before we show the next file. | |
329 | # This is because either we need to wait for confirmation |
|
328 | # This is because either we need to wait for confirmation | |
330 | # from the user between each invocation, or because, as far |
|
329 | # from the user between each invocation, or because, as far | |
331 | # as we know, the tool doesn't have a GUI, in which case |
|
330 | # as we know, the tool doesn't have a GUI, in which case | |
332 | # we can't run multiple CLI programs at the same time. |
|
331 | # we can't run multiple CLI programs at the same time. | |
333 | ui.debug( |
|
332 | ui.debug( | |
334 | b'running %r in %s\n' % (pycompat.bytestr(curcmdline), tmproot) |
|
333 | b'running %r in %s\n' % (pycompat.bytestr(curcmdline), tmproot) | |
335 | ) |
|
334 | ) | |
336 | ui.system(curcmdline, cwd=tmproot, blockedtag=b'extdiff') |
|
335 | ui.system(curcmdline, cwd=tmproot, blockedtag=b'extdiff') | |
337 | else: |
|
336 | else: | |
338 | # Run the comparison program but don't wait, as we're |
|
337 | # Run the comparison program but don't wait, as we're | |
339 | # going to rapid-fire each file diff and then wait on |
|
338 | # going to rapid-fire each file diff and then wait on | |
340 | # the whole group. |
|
339 | # the whole group. | |
341 | ui.debug( |
|
340 | ui.debug( | |
342 | b'running %r in %s (backgrounded)\n' |
|
341 | b'running %r in %s (backgrounded)\n' | |
343 | % (pycompat.bytestr(curcmdline), tmproot) |
|
342 | % (pycompat.bytestr(curcmdline), tmproot) | |
344 | ) |
|
343 | ) | |
345 | proc = _systembackground(curcmdline, cwd=tmproot) |
|
344 | proc = _systembackground(curcmdline, cwd=tmproot) | |
346 | waitprocs.append(proc) |
|
345 | waitprocs.append(proc) | |
347 |
|
346 | |||
348 | if waitprocs: |
|
347 | if waitprocs: | |
349 | with ui.timeblockedsection(b'extdiff'): |
|
348 | with ui.timeblockedsection(b'extdiff'): | |
350 | for proc in waitprocs: |
|
349 | for proc in waitprocs: | |
351 | proc.wait() |
|
350 | proc.wait() | |
352 |
|
351 | |||
353 |
|
352 | |||
354 | def dodiff(ui, repo, cmdline, pats, opts, guitool=False): |
|
353 | def dodiff(ui, repo, cmdline, pats, opts, guitool=False): | |
355 | '''Do the actual diff: |
|
354 | '''Do the actual diff: | |
356 |
|
355 | |||
357 | - copy to a temp structure if diffing 2 internal revisions |
|
356 | - copy to a temp structure if diffing 2 internal revisions | |
358 | - copy to a temp structure if diffing working revision with |
|
357 | - copy to a temp structure if diffing working revision with | |
359 | another one and more than 1 file is changed |
|
358 | another one and more than 1 file is changed | |
360 | - just invoke the diff for a single file in the working dir |
|
359 | - just invoke the diff for a single file in the working dir | |
361 | ''' |
|
360 | ''' | |
362 |
|
361 | |||
363 | cmdutil.check_at_most_one_arg(opts, b'rev', b'change') |
|
362 | cmdutil.check_at_most_one_arg(opts, b'rev', b'change') | |
364 | revs = opts.get(b'rev') |
|
363 | revs = opts.get(b'rev') | |
365 | change = opts.get(b'change') |
|
364 | change = opts.get(b'change') | |
366 | do3way = b'$parent2' in cmdline |
|
365 | do3way = b'$parent2' in cmdline | |
367 |
|
366 | |||
368 | if change: |
|
367 | if change: | |
369 | ctx2 = scmutil.revsingle(repo, change, None) |
|
368 | ctx2 = scmutil.revsingle(repo, change, None) | |
370 | ctx1a, ctx1b = ctx2.p1(), ctx2.p2() |
|
369 | ctx1a, ctx1b = ctx2.p1(), ctx2.p2() | |
371 | else: |
|
370 | else: | |
372 | ctx1a, ctx2 = scmutil.revpair(repo, revs) |
|
371 | ctx1a, ctx2 = scmutil.revpair(repo, revs) | |
373 | if not revs: |
|
372 | if not revs: | |
374 | ctx1b = repo[None].p2() |
|
373 | ctx1b = repo[None].p2() | |
375 | else: |
|
374 | else: | |
376 | ctx1b = repo[nullid] |
|
375 | ctx1b = repo[nullid] | |
377 |
|
376 | |||
378 | perfile = opts.get(b'per_file') |
|
377 | perfile = opts.get(b'per_file') | |
379 | confirm = opts.get(b'confirm') |
|
378 | confirm = opts.get(b'confirm') | |
380 |
|
379 | |||
381 | node1a = ctx1a.node() |
|
380 | node1a = ctx1a.node() | |
382 | node1b = ctx1b.node() |
|
381 | node1b = ctx1b.node() | |
383 | node2 = ctx2.node() |
|
382 | node2 = ctx2.node() | |
384 |
|
383 | |||
385 | # Disable 3-way merge if there is only one parent |
|
384 | # Disable 3-way merge if there is only one parent | |
386 | if do3way: |
|
385 | if do3way: | |
387 | if node1b == nullid: |
|
386 | if node1b == nullid: | |
388 | do3way = False |
|
387 | do3way = False | |
389 |
|
388 | |||
390 | subrepos = opts.get(b'subrepos') |
|
389 | subrepos = opts.get(b'subrepos') | |
391 |
|
390 | |||
392 | matcher = scmutil.match(repo[node2], pats, opts) |
|
391 | matcher = scmutil.match(repo[node2], pats, opts) | |
393 |
|
392 | |||
394 | if opts.get(b'patch'): |
|
393 | if opts.get(b'patch'): | |
395 | if subrepos: |
|
394 | if subrepos: | |
396 | raise error.Abort(_(b'--patch cannot be used with --subrepos')) |
|
395 | raise error.Abort(_(b'--patch cannot be used with --subrepos')) | |
397 | if perfile: |
|
396 | if perfile: | |
398 | raise error.Abort(_(b'--patch cannot be used with --per-file')) |
|
397 | raise error.Abort(_(b'--patch cannot be used with --per-file')) | |
399 | if node2 is None: |
|
398 | if node2 is None: | |
400 | raise error.Abort(_(b'--patch requires two revisions')) |
|
399 | raise error.Abort(_(b'--patch requires two revisions')) | |
401 | else: |
|
400 | else: | |
402 | st = repo.status(node1a, node2, matcher, listsubrepos=subrepos) |
|
401 | st = repo.status(node1a, node2, matcher, listsubrepos=subrepos) | |
403 | mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed) |
|
402 | mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed) | |
404 | if do3way: |
|
403 | if do3way: | |
405 | stb = repo.status(node1b, node2, matcher, listsubrepos=subrepos) |
|
404 | stb = repo.status(node1b, node2, matcher, listsubrepos=subrepos) | |
406 | mod_b, add_b, rem_b = ( |
|
405 | mod_b, add_b, rem_b = ( | |
407 | set(stb.modified), |
|
406 | set(stb.modified), | |
408 | set(stb.added), |
|
407 | set(stb.added), | |
409 | set(stb.removed), |
|
408 | set(stb.removed), | |
410 | ) |
|
409 | ) | |
411 | else: |
|
410 | else: | |
412 | mod_b, add_b, rem_b = set(), set(), set() |
|
411 | mod_b, add_b, rem_b = set(), set(), set() | |
413 | modadd = mod_a | add_a | mod_b | add_b |
|
412 | modadd = mod_a | add_a | mod_b | add_b | |
414 | common = modadd | rem_a | rem_b |
|
413 | common = modadd | rem_a | rem_b | |
415 | if not common: |
|
414 | if not common: | |
416 | return 0 |
|
415 | return 0 | |
417 |
|
416 | |||
418 | tmproot = pycompat.mkdtemp(prefix=b'extdiff.') |
|
417 | tmproot = pycompat.mkdtemp(prefix=b'extdiff.') | |
419 | try: |
|
418 | try: | |
420 | if not opts.get(b'patch'): |
|
419 | if not opts.get(b'patch'): | |
421 | # Always make a copy of node1a (and node1b, if applicable) |
|
420 | # Always make a copy of node1a (and node1b, if applicable) | |
422 | dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) |
|
421 | dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) | |
423 | dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot, subrepos)[ |
|
422 | dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot, subrepos)[ | |
424 | 0 |
|
423 | 0 | |
425 | ] |
|
424 | ] | |
426 | rev1a = b'@%d' % repo[node1a].rev() |
|
425 | rev1a = b'@%d' % repo[node1a].rev() | |
427 | if do3way: |
|
426 | if do3way: | |
428 | dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) |
|
427 | dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) | |
429 | dir1b = snapshot( |
|
428 | dir1b = snapshot( | |
430 | ui, repo, dir1b_files, node1b, tmproot, subrepos |
|
429 | ui, repo, dir1b_files, node1b, tmproot, subrepos | |
431 | )[0] |
|
430 | )[0] | |
432 | rev1b = b'@%d' % repo[node1b].rev() |
|
431 | rev1b = b'@%d' % repo[node1b].rev() | |
433 | else: |
|
432 | else: | |
434 | dir1b = None |
|
433 | dir1b = None | |
435 | rev1b = b'' |
|
434 | rev1b = b'' | |
436 |
|
435 | |||
437 | fnsandstat = [] |
|
436 | fnsandstat = [] | |
438 |
|
437 | |||
439 | # If node2 in not the wc or there is >1 change, copy it |
|
438 | # If node2 in not the wc or there is >1 change, copy it | |
440 | dir2root = b'' |
|
439 | dir2root = b'' | |
441 | rev2 = b'' |
|
440 | rev2 = b'' | |
442 | if node2: |
|
441 | if node2: | |
443 | dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0] |
|
442 | dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0] | |
444 | rev2 = b'@%d' % repo[node2].rev() |
|
443 | rev2 = b'@%d' % repo[node2].rev() | |
445 | elif len(common) > 1: |
|
444 | elif len(common) > 1: | |
446 | # we only actually need to get the files to copy back to |
|
445 | # we only actually need to get the files to copy back to | |
447 | # the working dir in this case (because the other cases |
|
446 | # the working dir in this case (because the other cases | |
448 | # are: diffing 2 revisions or single file -- in which case |
|
447 | # are: diffing 2 revisions or single file -- in which case | |
449 | # the file is already directly passed to the diff tool). |
|
448 | # the file is already directly passed to the diff tool). | |
450 | dir2, fnsandstat = snapshot( |
|
449 | dir2, fnsandstat = snapshot( | |
451 | ui, repo, modadd, None, tmproot, subrepos |
|
450 | ui, repo, modadd, None, tmproot, subrepos | |
452 | ) |
|
451 | ) | |
453 | else: |
|
452 | else: | |
454 | # This lets the diff tool open the changed file directly |
|
453 | # This lets the diff tool open the changed file directly | |
455 | dir2 = b'' |
|
454 | dir2 = b'' | |
456 | dir2root = repo.root |
|
455 | dir2root = repo.root | |
457 |
|
456 | |||
458 | label1a = rev1a |
|
457 | label1a = rev1a | |
459 | label1b = rev1b |
|
458 | label1b = rev1b | |
460 | label2 = rev2 |
|
459 | label2 = rev2 | |
461 |
|
460 | |||
462 | # If only one change, diff the files instead of the directories |
|
461 | # If only one change, diff the files instead of the directories | |
463 | # Handle bogus modifies correctly by checking if the files exist |
|
462 | # Handle bogus modifies correctly by checking if the files exist | |
464 | if len(common) == 1: |
|
463 | if len(common) == 1: | |
465 | common_file = util.localpath(common.pop()) |
|
464 | common_file = util.localpath(common.pop()) | |
466 | dir1a = os.path.join(tmproot, dir1a, common_file) |
|
465 | dir1a = os.path.join(tmproot, dir1a, common_file) | |
467 | label1a = common_file + rev1a |
|
466 | label1a = common_file + rev1a | |
468 | if not os.path.isfile(dir1a): |
|
467 | if not os.path.isfile(dir1a): | |
469 | dir1a = pycompat.osdevnull |
|
468 | dir1a = pycompat.osdevnull | |
470 | if do3way: |
|
469 | if do3way: | |
471 | dir1b = os.path.join(tmproot, dir1b, common_file) |
|
470 | dir1b = os.path.join(tmproot, dir1b, common_file) | |
472 | label1b = common_file + rev1b |
|
471 | label1b = common_file + rev1b | |
473 | if not os.path.isfile(dir1b): |
|
472 | if not os.path.isfile(dir1b): | |
474 | dir1b = pycompat.osdevnull |
|
473 | dir1b = pycompat.osdevnull | |
475 | dir2 = os.path.join(dir2root, dir2, common_file) |
|
474 | dir2 = os.path.join(dir2root, dir2, common_file) | |
476 | label2 = common_file + rev2 |
|
475 | label2 = common_file + rev2 | |
477 | else: |
|
476 | else: | |
478 | template = b'hg-%h.patch' |
|
477 | template = b'hg-%h.patch' | |
479 | with formatter.nullformatter(ui, b'extdiff', {}) as fm: |
|
478 | with formatter.nullformatter(ui, b'extdiff', {}) as fm: | |
480 | cmdutil.export( |
|
479 | cmdutil.export( | |
481 | repo, |
|
480 | repo, | |
482 | [repo[node1a].rev(), repo[node2].rev()], |
|
481 | [repo[node1a].rev(), repo[node2].rev()], | |
483 | fm, |
|
482 | fm, | |
484 | fntemplate=repo.vfs.reljoin(tmproot, template), |
|
483 | fntemplate=repo.vfs.reljoin(tmproot, template), | |
485 | match=matcher, |
|
484 | match=matcher, | |
486 | ) |
|
485 | ) | |
487 | label1a = cmdutil.makefilename(repo[node1a], template) |
|
486 | label1a = cmdutil.makefilename(repo[node1a], template) | |
488 | label2 = cmdutil.makefilename(repo[node2], template) |
|
487 | label2 = cmdutil.makefilename(repo[node2], template) | |
489 | dir1a = repo.vfs.reljoin(tmproot, label1a) |
|
488 | dir1a = repo.vfs.reljoin(tmproot, label1a) | |
490 | dir2 = repo.vfs.reljoin(tmproot, label2) |
|
489 | dir2 = repo.vfs.reljoin(tmproot, label2) | |
491 | dir1b = None |
|
490 | dir1b = None | |
492 | label1b = None |
|
491 | label1b = None | |
493 | fnsandstat = [] |
|
492 | fnsandstat = [] | |
494 |
|
493 | |||
495 | if not perfile: |
|
494 | if not perfile: | |
496 | # Run the external tool on the 2 temp directories or the patches |
|
495 | # Run the external tool on the 2 temp directories or the patches | |
497 | cmdline = formatcmdline( |
|
496 | cmdline = formatcmdline( | |
498 | cmdline, |
|
497 | cmdline, | |
499 | repo.root, |
|
498 | repo.root, | |
500 | do3way=do3way, |
|
499 | do3way=do3way, | |
501 | parent1=dir1a, |
|
500 | parent1=dir1a, | |
502 | plabel1=label1a, |
|
501 | plabel1=label1a, | |
503 | parent2=dir1b, |
|
502 | parent2=dir1b, | |
504 | plabel2=label1b, |
|
503 | plabel2=label1b, | |
505 | child=dir2, |
|
504 | child=dir2, | |
506 | clabel=label2, |
|
505 | clabel=label2, | |
507 | ) |
|
506 | ) | |
508 | ui.debug( |
|
507 | ui.debug( | |
509 | b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot) |
|
508 | b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot) | |
510 | ) |
|
509 | ) | |
511 | ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff') |
|
510 | ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff') | |
512 | else: |
|
511 | else: | |
513 | # Run the external tool once for each pair of files |
|
512 | # Run the external tool once for each pair of files | |
514 | _runperfilediff( |
|
513 | _runperfilediff( | |
515 | cmdline, |
|
514 | cmdline, | |
516 | repo.root, |
|
515 | repo.root, | |
517 | ui, |
|
516 | ui, | |
518 | guitool=guitool, |
|
517 | guitool=guitool, | |
519 | do3way=do3way, |
|
518 | do3way=do3way, | |
520 | confirm=confirm, |
|
519 | confirm=confirm, | |
521 | commonfiles=common, |
|
520 | commonfiles=common, | |
522 | tmproot=tmproot, |
|
521 | tmproot=tmproot, | |
523 | dir1a=dir1a, |
|
522 | dir1a=dir1a, | |
524 | dir1b=dir1b, |
|
523 | dir1b=dir1b, | |
525 | dir2root=dir2root, |
|
524 | dir2root=dir2root, | |
526 | dir2=dir2, |
|
525 | dir2=dir2, | |
527 | rev1a=rev1a, |
|
526 | rev1a=rev1a, | |
528 | rev1b=rev1b, |
|
527 | rev1b=rev1b, | |
529 | rev2=rev2, |
|
528 | rev2=rev2, | |
530 | ) |
|
529 | ) | |
531 |
|
530 | |||
532 | for copy_fn, working_fn, st in fnsandstat: |
|
531 | for copy_fn, working_fn, st in fnsandstat: | |
533 | cpstat = os.lstat(copy_fn) |
|
532 | cpstat = os.lstat(copy_fn) | |
534 | # Some tools copy the file and attributes, so mtime may not detect |
|
533 | # Some tools copy the file and attributes, so mtime may not detect | |
535 | # all changes. A size check will detect more cases, but not all. |
|
534 | # all changes. A size check will detect more cases, but not all. | |
536 | # The only certain way to detect every case is to diff all files, |
|
535 | # The only certain way to detect every case is to diff all files, | |
537 | # which could be expensive. |
|
536 | # which could be expensive. | |
538 | # copyfile() carries over the permission, so the mode check could |
|
537 | # copyfile() carries over the permission, so the mode check could | |
539 | # be in an 'elif' branch, but for the case where the file has |
|
538 | # be in an 'elif' branch, but for the case where the file has | |
540 | # changed without affecting mtime or size. |
|
539 | # changed without affecting mtime or size. | |
541 | if ( |
|
540 | if ( | |
542 | cpstat[stat.ST_MTIME] != st[stat.ST_MTIME] |
|
541 | cpstat[stat.ST_MTIME] != st[stat.ST_MTIME] | |
543 | or cpstat.st_size != st.st_size |
|
542 | or cpstat.st_size != st.st_size | |
544 | or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100) |
|
543 | or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100) | |
545 | ): |
|
544 | ): | |
546 | ui.debug( |
|
545 | ui.debug( | |
547 | b'file changed while diffing. ' |
|
546 | b'file changed while diffing. ' | |
548 | b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn) |
|
547 | b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn) | |
549 | ) |
|
548 | ) | |
550 | util.copyfile(copy_fn, working_fn) |
|
549 | util.copyfile(copy_fn, working_fn) | |
551 |
|
550 | |||
552 | return 1 |
|
551 | return 1 | |
553 | finally: |
|
552 | finally: | |
554 | ui.note(_(b'cleaning up temp directory\n')) |
|
553 | ui.note(_(b'cleaning up temp directory\n')) | |
555 | shutil.rmtree(tmproot) |
|
554 | shutil.rmtree(tmproot) | |
556 |
|
555 | |||
557 |
|
556 | |||
558 | extdiffopts = ( |
|
557 | extdiffopts = ( | |
559 | [ |
|
558 | [ | |
560 | ( |
|
559 | ( | |
561 | b'o', |
|
560 | b'o', | |
562 | b'option', |
|
561 | b'option', | |
563 | [], |
|
562 | [], | |
564 | _(b'pass option to comparison program'), |
|
563 | _(b'pass option to comparison program'), | |
565 | _(b'OPT'), |
|
564 | _(b'OPT'), | |
566 | ), |
|
565 | ), | |
567 | (b'r', b'rev', [], _(b'revision'), _(b'REV')), |
|
566 | (b'r', b'rev', [], _(b'revision'), _(b'REV')), | |
568 | (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')), |
|
567 | (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')), | |
569 | ( |
|
568 | ( | |
570 | b'', |
|
569 | b'', | |
571 | b'per-file', |
|
570 | b'per-file', | |
572 | False, |
|
571 | False, | |
573 | _(b'compare each file instead of revision snapshots'), |
|
572 | _(b'compare each file instead of revision snapshots'), | |
574 | ), |
|
573 | ), | |
575 | ( |
|
574 | ( | |
576 | b'', |
|
575 | b'', | |
577 | b'confirm', |
|
576 | b'confirm', | |
578 | False, |
|
577 | False, | |
579 | _(b'prompt user before each external program invocation'), |
|
578 | _(b'prompt user before each external program invocation'), | |
580 | ), |
|
579 | ), | |
581 | (b'', b'patch', None, _(b'compare patches for two revisions')), |
|
580 | (b'', b'patch', None, _(b'compare patches for two revisions')), | |
582 | ] |
|
581 | ] | |
583 | + cmdutil.walkopts |
|
582 | + cmdutil.walkopts | |
584 | + cmdutil.subrepoopts |
|
583 | + cmdutil.subrepoopts | |
585 | ) |
|
584 | ) | |
586 |
|
585 | |||
587 |
|
586 | |||
588 | @command( |
|
587 | @command( | |
589 | b'extdiff', |
|
588 | b'extdiff', | |
590 | [(b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')),] |
|
589 | [(b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')),] | |
591 | + extdiffopts, |
|
590 | + extdiffopts, | |
592 | _(b'hg extdiff [OPT]... [FILE]...'), |
|
591 | _(b'hg extdiff [OPT]... [FILE]...'), | |
593 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
592 | helpcategory=command.CATEGORY_FILE_CONTENTS, | |
594 | inferrepo=True, |
|
593 | inferrepo=True, | |
595 | ) |
|
594 | ) | |
596 | def extdiff(ui, repo, *pats, **opts): |
|
595 | def extdiff(ui, repo, *pats, **opts): | |
597 | '''use external program to diff repository (or selected files) |
|
596 | '''use external program to diff repository (or selected files) | |
598 |
|
597 | |||
599 | Show differences between revisions for the specified files, using |
|
598 | Show differences between revisions for the specified files, using | |
600 | an external program. The default program used is diff, with |
|
599 | an external program. The default program used is diff, with | |
601 | default options "-Npru". |
|
600 | default options "-Npru". | |
602 |
|
601 | |||
603 | To select a different program, use the -p/--program option. The |
|
602 | To select a different program, use the -p/--program option. The | |
604 | program will be passed the names of two directories to compare, |
|
603 | program will be passed the names of two directories to compare, | |
605 | unless the --per-file option is specified (see below). To pass |
|
604 | unless the --per-file option is specified (see below). To pass | |
606 | additional options to the program, use -o/--option. These will be |
|
605 | additional options to the program, use -o/--option. These will be | |
607 | passed before the names of the directories or files to compare. |
|
606 | passed before the names of the directories or files to compare. | |
608 |
|
607 | |||
609 | When two revision arguments are given, then changes are shown |
|
608 | When two revision arguments are given, then changes are shown | |
610 | between those revisions. If only one revision is specified then |
|
609 | between those revisions. If only one revision is specified then | |
611 | that revision is compared to the working directory, and, when no |
|
610 | that revision is compared to the working directory, and, when no | |
612 | revisions are specified, the working directory files are compared |
|
611 | revisions are specified, the working directory files are compared | |
613 | to its parent. |
|
612 | to its parent. | |
614 |
|
613 | |||
615 | The --per-file option runs the external program repeatedly on each |
|
614 | The --per-file option runs the external program repeatedly on each | |
616 | file to diff, instead of once on two directories. By default, |
|
615 | file to diff, instead of once on two directories. By default, | |
617 | this happens one by one, where the next file diff is open in the |
|
616 | this happens one by one, where the next file diff is open in the | |
618 | external program only once the previous external program (for the |
|
617 | external program only once the previous external program (for the | |
619 | previous file diff) has exited. If the external program has a |
|
618 | previous file diff) has exited. If the external program has a | |
620 | graphical interface, it can open all the file diffs at once instead |
|
619 | graphical interface, it can open all the file diffs at once instead | |
621 | of one by one. See :hg:`help -e extdiff` for information about how |
|
620 | of one by one. See :hg:`help -e extdiff` for information about how | |
622 | to tell Mercurial that a given program has a graphical interface. |
|
621 | to tell Mercurial that a given program has a graphical interface. | |
623 |
|
622 | |||
624 | The --confirm option will prompt the user before each invocation of |
|
623 | The --confirm option will prompt the user before each invocation of | |
625 | the external program. It is ignored if --per-file isn't specified. |
|
624 | the external program. It is ignored if --per-file isn't specified. | |
626 | ''' |
|
625 | ''' | |
627 | opts = pycompat.byteskwargs(opts) |
|
626 | opts = pycompat.byteskwargs(opts) | |
628 | program = opts.get(b'program') |
|
627 | program = opts.get(b'program') | |
629 | option = opts.get(b'option') |
|
628 | option = opts.get(b'option') | |
630 | if not program: |
|
629 | if not program: | |
631 | program = b'diff' |
|
630 | program = b'diff' | |
632 | option = option or [b'-Npru'] |
|
631 | option = option or [b'-Npru'] | |
633 | cmdline = b' '.join(map(procutil.shellquote, [program] + option)) |
|
632 | cmdline = b' '.join(map(procutil.shellquote, [program] + option)) | |
634 | return dodiff(ui, repo, cmdline, pats, opts) |
|
633 | return dodiff(ui, repo, cmdline, pats, opts) | |
635 |
|
634 | |||
636 |
|
635 | |||
637 | class savedcmd(object): |
|
636 | class savedcmd(object): | |
638 | """use external program to diff repository (or selected files) |
|
637 | """use external program to diff repository (or selected files) | |
639 |
|
638 | |||
640 | Show differences between revisions for the specified files, using |
|
639 | Show differences between revisions for the specified files, using | |
641 | the following program:: |
|
640 | the following program:: | |
642 |
|
641 | |||
643 | %(path)s |
|
642 | %(path)s | |
644 |
|
643 | |||
645 | When two revision arguments are given, then changes are shown |
|
644 | When two revision arguments are given, then changes are shown | |
646 | between those revisions. If only one revision is specified then |
|
645 | between those revisions. If only one revision is specified then | |
647 | that revision is compared to the working directory, and, when no |
|
646 | that revision is compared to the working directory, and, when no | |
648 | revisions are specified, the working directory files are compared |
|
647 | revisions are specified, the working directory files are compared | |
649 | to its parent. |
|
648 | to its parent. | |
650 | """ |
|
649 | """ | |
651 |
|
650 | |||
652 | def __init__(self, path, cmdline, isgui): |
|
651 | def __init__(self, path, cmdline, isgui): | |
653 | # We can't pass non-ASCII through docstrings (and path is |
|
652 | # We can't pass non-ASCII through docstrings (and path is | |
654 | # in an unknown encoding anyway), but avoid double separators on |
|
653 | # in an unknown encoding anyway), but avoid double separators on | |
655 | # Windows |
|
654 | # Windows | |
656 | docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\') |
|
655 | docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\') | |
657 | self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))} |
|
656 | self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))} | |
658 | self._cmdline = cmdline |
|
657 | self._cmdline = cmdline | |
659 | self._isgui = isgui |
|
658 | self._isgui = isgui | |
660 |
|
659 | |||
661 | def __call__(self, ui, repo, *pats, **opts): |
|
660 | def __call__(self, ui, repo, *pats, **opts): | |
662 | opts = pycompat.byteskwargs(opts) |
|
661 | opts = pycompat.byteskwargs(opts) | |
663 | options = b' '.join(map(procutil.shellquote, opts[b'option'])) |
|
662 | options = b' '.join(map(procutil.shellquote, opts[b'option'])) | |
664 | if options: |
|
663 | if options: | |
665 | options = b' ' + options |
|
664 | options = b' ' + options | |
666 | return dodiff( |
|
665 | return dodiff( | |
667 | ui, repo, self._cmdline + options, pats, opts, guitool=self._isgui |
|
666 | ui, repo, self._cmdline + options, pats, opts, guitool=self._isgui | |
668 | ) |
|
667 | ) | |
669 |
|
668 | |||
670 |
|
669 | |||
671 | def uisetup(ui): |
|
670 | def uisetup(ui): | |
672 | for cmd, path in ui.configitems(b'extdiff'): |
|
671 | for cmd, path in ui.configitems(b'extdiff'): | |
673 | path = util.expandpath(path) |
|
672 | path = util.expandpath(path) | |
674 | if cmd.startswith(b'cmd.'): |
|
673 | if cmd.startswith(b'cmd.'): | |
675 | cmd = cmd[4:] |
|
674 | cmd = cmd[4:] | |
676 | if not path: |
|
675 | if not path: | |
677 | path = procutil.findexe(cmd) |
|
676 | path = procutil.findexe(cmd) | |
678 | if path is None: |
|
677 | if path is None: | |
679 | path = filemerge.findexternaltool(ui, cmd) or cmd |
|
678 | path = filemerge.findexternaltool(ui, cmd) or cmd | |
680 | diffopts = ui.config(b'extdiff', b'opts.' + cmd) |
|
679 | diffopts = ui.config(b'extdiff', b'opts.' + cmd) | |
681 | cmdline = procutil.shellquote(path) |
|
680 | cmdline = procutil.shellquote(path) | |
682 | if diffopts: |
|
681 | if diffopts: | |
683 | cmdline += b' ' + diffopts |
|
682 | cmdline += b' ' + diffopts | |
684 | isgui = ui.configbool(b'extdiff', b'gui.' + cmd) |
|
683 | isgui = ui.configbool(b'extdiff', b'gui.' + cmd) | |
685 | elif cmd.startswith(b'opts.') or cmd.startswith(b'gui.'): |
|
684 | elif cmd.startswith(b'opts.') or cmd.startswith(b'gui.'): | |
686 | continue |
|
685 | continue | |
687 | else: |
|
686 | else: | |
688 | if path: |
|
687 | if path: | |
689 | # case "cmd = path opts" |
|
688 | # case "cmd = path opts" | |
690 | cmdline = path |
|
689 | cmdline = path | |
691 | diffopts = len(pycompat.shlexsplit(cmdline)) > 1 |
|
690 | diffopts = len(pycompat.shlexsplit(cmdline)) > 1 | |
692 | else: |
|
691 | else: | |
693 | # case "cmd =" |
|
692 | # case "cmd =" | |
694 | path = procutil.findexe(cmd) |
|
693 | path = procutil.findexe(cmd) | |
695 | if path is None: |
|
694 | if path is None: | |
696 | path = filemerge.findexternaltool(ui, cmd) or cmd |
|
695 | path = filemerge.findexternaltool(ui, cmd) or cmd | |
697 | cmdline = procutil.shellquote(path) |
|
696 | cmdline = procutil.shellquote(path) | |
698 | diffopts = False |
|
697 | diffopts = False | |
699 | isgui = ui.configbool(b'extdiff', b'gui.' + cmd) |
|
698 | isgui = ui.configbool(b'extdiff', b'gui.' + cmd) | |
700 | # look for diff arguments in [diff-tools] then [merge-tools] |
|
699 | # look for diff arguments in [diff-tools] then [merge-tools] | |
701 | if not diffopts: |
|
700 | if not diffopts: | |
702 | key = cmd + b'.diffargs' |
|
701 | key = cmd + b'.diffargs' | |
703 | for section in (b'diff-tools', b'merge-tools'): |
|
702 | for section in (b'diff-tools', b'merge-tools'): | |
704 | args = ui.config(section, key) |
|
703 | args = ui.config(section, key) | |
705 | if args: |
|
704 | if args: | |
706 | cmdline += b' ' + args |
|
705 | cmdline += b' ' + args | |
707 | if isgui is None: |
|
706 | if isgui is None: | |
708 | isgui = ui.configbool(section, cmd + b'.gui') or False |
|
707 | isgui = ui.configbool(section, cmd + b'.gui') or False | |
709 | break |
|
708 | break | |
710 | command( |
|
709 | command( | |
711 | cmd, |
|
710 | cmd, | |
712 | extdiffopts[:], |
|
711 | extdiffopts[:], | |
713 | _(b'hg %s [OPTION]... [FILE]...') % cmd, |
|
712 | _(b'hg %s [OPTION]... [FILE]...') % cmd, | |
714 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
713 | helpcategory=command.CATEGORY_FILE_CONTENTS, | |
715 | inferrepo=True, |
|
714 | inferrepo=True, | |
716 | )(savedcmd(path, cmdline, isgui)) |
|
715 | )(savedcmd(path, cmdline, isgui)) | |
717 |
|
716 | |||
718 |
|
717 | |||
719 | # tell hggettext to extract docstrings from these functions: |
|
718 | # tell hggettext to extract docstrings from these functions: | |
720 | i18nfunctions = [savedcmd] |
|
719 | i18nfunctions = [savedcmd] |
@@ -1,718 +1,718 b'' | |||||
1 | # chgserver.py - command server extension for cHg |
|
1 | # chgserver.py - command server extension for cHg | |
2 | # |
|
2 | # | |
3 | # Copyright 2011 Yuya Nishihara <yuya@tcha.org> |
|
3 | # Copyright 2011 Yuya Nishihara <yuya@tcha.org> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | """command server extension for cHg |
|
8 | """command server extension for cHg | |
9 |
|
9 | |||
10 | 'S' channel (read/write) |
|
10 | 'S' channel (read/write) | |
11 | propagate ui.system() request to client |
|
11 | propagate ui.system() request to client | |
12 |
|
12 | |||
13 | 'attachio' command |
|
13 | 'attachio' command | |
14 | attach client's stdio passed by sendmsg() |
|
14 | attach client's stdio passed by sendmsg() | |
15 |
|
15 | |||
16 | 'chdir' command |
|
16 | 'chdir' command | |
17 | change current directory |
|
17 | change current directory | |
18 |
|
18 | |||
19 | 'setenv' command |
|
19 | 'setenv' command | |
20 | replace os.environ completely |
|
20 | replace os.environ completely | |
21 |
|
21 | |||
22 | 'setumask' command (DEPRECATED) |
|
22 | 'setumask' command (DEPRECATED) | |
23 | 'setumask2' command |
|
23 | 'setumask2' command | |
24 | set umask |
|
24 | set umask | |
25 |
|
25 | |||
26 | 'validate' command |
|
26 | 'validate' command | |
27 | reload the config and check if the server is up to date |
|
27 | reload the config and check if the server is up to date | |
28 |
|
28 | |||
29 | Config |
|
29 | Config | |
30 | ------ |
|
30 | ------ | |
31 |
|
31 | |||
32 | :: |
|
32 | :: | |
33 |
|
33 | |||
34 | [chgserver] |
|
34 | [chgserver] | |
35 | # how long (in seconds) should an idle chg server exit |
|
35 | # how long (in seconds) should an idle chg server exit | |
36 | idletimeout = 3600 |
|
36 | idletimeout = 3600 | |
37 |
|
37 | |||
38 | # whether to skip config or env change checks |
|
38 | # whether to skip config or env change checks | |
39 | skiphash = False |
|
39 | skiphash = False | |
40 | """ |
|
40 | """ | |
41 |
|
41 | |||
42 | from __future__ import absolute_import |
|
42 | from __future__ import absolute_import | |
43 |
|
43 | |||
44 | import inspect |
|
44 | import inspect | |
45 | import os |
|
45 | import os | |
46 | import re |
|
46 | import re | |
47 | import socket |
|
47 | import socket | |
48 | import stat |
|
48 | import stat | |
49 | import struct |
|
49 | import struct | |
50 | import time |
|
50 | import time | |
51 |
|
51 | |||
52 | from .i18n import _ |
|
52 | from .i18n import _ | |
53 | from .pycompat import ( |
|
53 | from .pycompat import ( | |
54 | getattr, |
|
54 | getattr, | |
55 | setattr, |
|
55 | setattr, | |
56 | ) |
|
56 | ) | |
57 |
|
57 | |||
58 | from . import ( |
|
58 | from . import ( | |
59 | commandserver, |
|
59 | commandserver, | |
60 | encoding, |
|
60 | encoding, | |
61 | error, |
|
61 | error, | |
62 | extensions, |
|
62 | extensions, | |
63 | node, |
|
63 | node, | |
64 | pycompat, |
|
64 | pycompat, | |
65 | util, |
|
65 | util, | |
66 | ) |
|
66 | ) | |
67 |
|
67 | |||
68 | from .utils import ( |
|
68 | from .utils import ( | |
69 | hashutil, |
|
69 | hashutil, | |
70 | procutil, |
|
70 | procutil, | |
71 | stringutil, |
|
71 | stringutil, | |
72 | ) |
|
72 | ) | |
73 |
|
73 | |||
74 |
|
74 | |||
75 | def _hashlist(items): |
|
75 | def _hashlist(items): | |
76 | """return sha1 hexdigest for a list""" |
|
76 | """return sha1 hexdigest for a list""" | |
77 | return node.hex(hashutil.sha1(stringutil.pprint(items)).digest()) |
|
77 | return node.hex(hashutil.sha1(stringutil.pprint(items)).digest()) | |
78 |
|
78 | |||
79 |
|
79 | |||
80 | # sensitive config sections affecting confighash |
|
80 | # sensitive config sections affecting confighash | |
81 | _configsections = [ |
|
81 | _configsections = [ | |
82 | b'alias', # affects global state commands.table |
|
82 | b'alias', # affects global state commands.table | |
83 | b'diff-tools', # affects whether gui or not in extdiff's uisetup |
|
83 | b'diff-tools', # affects whether gui or not in extdiff's uisetup | |
84 | b'eol', # uses setconfig('eol', ...) |
|
84 | b'eol', # uses setconfig('eol', ...) | |
85 | b'extdiff', # uisetup will register new commands |
|
85 | b'extdiff', # uisetup will register new commands | |
86 | b'extensions', |
|
86 | b'extensions', | |
87 | b'fastannotate', # affects annotate command and adds fastannonate cmd |
|
87 | b'fastannotate', # affects annotate command and adds fastannonate cmd | |
88 | b'merge-tools', # affects whether gui or not in extdiff's uisetup |
|
88 | b'merge-tools', # affects whether gui or not in extdiff's uisetup | |
89 | b'schemes', # extsetup will update global hg.schemes |
|
89 | b'schemes', # extsetup will update global hg.schemes | |
90 | ] |
|
90 | ] | |
91 |
|
91 | |||
92 | _configsectionitems = [ |
|
92 | _configsectionitems = [ | |
93 | (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup |
|
93 | (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup | |
94 | ] |
|
94 | ] | |
95 |
|
95 | |||
96 | # sensitive environment variables affecting confighash |
|
96 | # sensitive environment variables affecting confighash | |
97 | _envre = re.compile( |
|
97 | _envre = re.compile( | |
98 | br'''\A(?: |
|
98 | br'''\A(?: | |
99 | CHGHG |
|
99 | CHGHG | |
100 | |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)? |
|
100 | |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)? | |
101 | |HG(?:ENCODING|PLAIN).* |
|
101 | |HG(?:ENCODING|PLAIN).* | |
102 | |LANG(?:UAGE)? |
|
102 | |LANG(?:UAGE)? | |
103 | |LC_.* |
|
103 | |LC_.* | |
104 | |LD_.* |
|
104 | |LD_.* | |
105 | |PATH |
|
105 | |PATH | |
106 | |PYTHON.* |
|
106 | |PYTHON.* | |
107 | |TERM(?:INFO)? |
|
107 | |TERM(?:INFO)? | |
108 | |TZ |
|
108 | |TZ | |
109 | )\Z''', |
|
109 | )\Z''', | |
110 | re.X, |
|
110 | re.X, | |
111 | ) |
|
111 | ) | |
112 |
|
112 | |||
113 |
|
113 | |||
114 | def _confighash(ui): |
|
114 | def _confighash(ui): | |
115 | """return a quick hash for detecting config/env changes |
|
115 | """return a quick hash for detecting config/env changes | |
116 |
|
116 | |||
117 | confighash is the hash of sensitive config items and environment variables. |
|
117 | confighash is the hash of sensitive config items and environment variables. | |
118 |
|
118 | |||
119 | for chgserver, it is designed that once confighash changes, the server is |
|
119 | for chgserver, it is designed that once confighash changes, the server is | |
120 | not qualified to serve its client and should redirect the client to a new |
|
120 | not qualified to serve its client and should redirect the client to a new | |
121 | server. different from mtimehash, confighash change will not mark the |
|
121 | server. different from mtimehash, confighash change will not mark the | |
122 | server outdated and exit since the user can have different configs at the |
|
122 | server outdated and exit since the user can have different configs at the | |
123 | same time. |
|
123 | same time. | |
124 | """ |
|
124 | """ | |
125 | sectionitems = [] |
|
125 | sectionitems = [] | |
126 | for section in _configsections: |
|
126 | for section in _configsections: | |
127 | sectionitems.append(ui.configitems(section)) |
|
127 | sectionitems.append(ui.configitems(section)) | |
128 | for section, item in _configsectionitems: |
|
128 | for section, item in _configsectionitems: | |
129 | sectionitems.append(ui.config(section, item)) |
|
129 | sectionitems.append(ui.config(section, item)) | |
130 | sectionhash = _hashlist(sectionitems) |
|
130 | sectionhash = _hashlist(sectionitems) | |
131 | # If $CHGHG is set, the change to $HG should not trigger a new chg server |
|
131 | # If $CHGHG is set, the change to $HG should not trigger a new chg server | |
132 | if b'CHGHG' in encoding.environ: |
|
132 | if b'CHGHG' in encoding.environ: | |
133 | ignored = {b'HG'} |
|
133 | ignored = {b'HG'} | |
134 | else: |
|
134 | else: | |
135 | ignored = set() |
|
135 | ignored = set() | |
136 | envitems = [ |
|
136 | envitems = [ | |
137 | (k, v) |
|
137 | (k, v) | |
138 | for k, v in pycompat.iteritems(encoding.environ) |
|
138 | for k, v in pycompat.iteritems(encoding.environ) | |
139 | if _envre.match(k) and k not in ignored |
|
139 | if _envre.match(k) and k not in ignored | |
140 | ] |
|
140 | ] | |
141 | envhash = _hashlist(sorted(envitems)) |
|
141 | envhash = _hashlist(sorted(envitems)) | |
142 | return sectionhash[:6] + envhash[:6] |
|
142 | return sectionhash[:6] + envhash[:6] | |
143 |
|
143 | |||
144 |
|
144 | |||
145 | def _getmtimepaths(ui): |
|
145 | def _getmtimepaths(ui): | |
146 | """get a list of paths that should be checked to detect change |
|
146 | """get a list of paths that should be checked to detect change | |
147 |
|
147 | |||
148 | The list will include: |
|
148 | The list will include: | |
149 | - extensions (will not cover all files for complex extensions) |
|
149 | - extensions (will not cover all files for complex extensions) | |
150 | - mercurial/__version__.py |
|
150 | - mercurial/__version__.py | |
151 | - python binary |
|
151 | - python binary | |
152 | """ |
|
152 | """ | |
153 | modules = [m for n, m in extensions.extensions(ui)] |
|
153 | modules = [m for n, m in extensions.extensions(ui)] | |
154 | try: |
|
154 | try: | |
155 | from . import __version__ |
|
155 | from . import __version__ | |
156 |
|
156 | |||
157 | modules.append(__version__) |
|
157 | modules.append(__version__) | |
158 | except ImportError: |
|
158 | except ImportError: | |
159 | pass |
|
159 | pass | |
160 | files = [] |
|
160 | files = [] | |
161 | if pycompat.sysexecutable: |
|
161 | if pycompat.sysexecutable: | |
162 | files.append(pycompat.sysexecutable) |
|
162 | files.append(pycompat.sysexecutable) | |
163 | for m in modules: |
|
163 | for m in modules: | |
164 | try: |
|
164 | try: | |
165 | files.append(pycompat.fsencode(inspect.getabsfile(m))) |
|
165 | files.append(pycompat.fsencode(inspect.getabsfile(m))) | |
166 | except TypeError: |
|
166 | except TypeError: | |
167 | pass |
|
167 | pass | |
168 | return sorted(set(files)) |
|
168 | return sorted(set(files)) | |
169 |
|
169 | |||
170 |
|
170 | |||
171 | def _mtimehash(paths): |
|
171 | def _mtimehash(paths): | |
172 | """return a quick hash for detecting file changes |
|
172 | """return a quick hash for detecting file changes | |
173 |
|
173 | |||
174 | mtimehash calls stat on given paths and calculate a hash based on size and |
|
174 | mtimehash calls stat on given paths and calculate a hash based on size and | |
175 | mtime of each file. mtimehash does not read file content because reading is |
|
175 | mtime of each file. mtimehash does not read file content because reading is | |
176 | expensive. therefore it's not 100% reliable for detecting content changes. |
|
176 | expensive. therefore it's not 100% reliable for detecting content changes. | |
177 | it's possible to return different hashes for same file contents. |
|
177 | it's possible to return different hashes for same file contents. | |
178 | it's also possible to return a same hash for different file contents for |
|
178 | it's also possible to return a same hash for different file contents for | |
179 | some carefully crafted situation. |
|
179 | some carefully crafted situation. | |
180 |
|
180 | |||
181 | for chgserver, it is designed that once mtimehash changes, the server is |
|
181 | for chgserver, it is designed that once mtimehash changes, the server is | |
182 | considered outdated immediately and should no longer provide service. |
|
182 | considered outdated immediately and should no longer provide service. | |
183 |
|
183 | |||
184 | mtimehash is not included in confighash because we only know the paths of |
|
184 | mtimehash is not included in confighash because we only know the paths of | |
185 | extensions after importing them (there is imp.find_module but that faces |
|
185 | extensions after importing them (there is imp.find_module but that faces | |
186 | race conditions). We need to calculate confighash without importing. |
|
186 | race conditions). We need to calculate confighash without importing. | |
187 | """ |
|
187 | """ | |
188 |
|
188 | |||
189 | def trystat(path): |
|
189 | def trystat(path): | |
190 | try: |
|
190 | try: | |
191 | st = os.stat(path) |
|
191 | st = os.stat(path) | |
192 | return (st[stat.ST_MTIME], st.st_size) |
|
192 | return (st[stat.ST_MTIME], st.st_size) | |
193 | except OSError: |
|
193 | except OSError: | |
194 | # could be ENOENT, EPERM etc. not fatal in any case |
|
194 | # could be ENOENT, EPERM etc. not fatal in any case | |
195 | pass |
|
195 | pass | |
196 |
|
196 | |||
197 | return _hashlist(pycompat.maplist(trystat, paths))[:12] |
|
197 | return _hashlist(pycompat.maplist(trystat, paths))[:12] | |
198 |
|
198 | |||
199 |
|
199 | |||
200 | class hashstate(object): |
|
200 | class hashstate(object): | |
201 | """a structure storing confighash, mtimehash, paths used for mtimehash""" |
|
201 | """a structure storing confighash, mtimehash, paths used for mtimehash""" | |
202 |
|
202 | |||
203 | def __init__(self, confighash, mtimehash, mtimepaths): |
|
203 | def __init__(self, confighash, mtimehash, mtimepaths): | |
204 | self.confighash = confighash |
|
204 | self.confighash = confighash | |
205 | self.mtimehash = mtimehash |
|
205 | self.mtimehash = mtimehash | |
206 | self.mtimepaths = mtimepaths |
|
206 | self.mtimepaths = mtimepaths | |
207 |
|
207 | |||
208 | @staticmethod |
|
208 | @staticmethod | |
209 | def fromui(ui, mtimepaths=None): |
|
209 | def fromui(ui, mtimepaths=None): | |
210 | if mtimepaths is None: |
|
210 | if mtimepaths is None: | |
211 | mtimepaths = _getmtimepaths(ui) |
|
211 | mtimepaths = _getmtimepaths(ui) | |
212 | confighash = _confighash(ui) |
|
212 | confighash = _confighash(ui) | |
213 | mtimehash = _mtimehash(mtimepaths) |
|
213 | mtimehash = _mtimehash(mtimepaths) | |
214 | ui.log( |
|
214 | ui.log( | |
215 | b'cmdserver', |
|
215 | b'cmdserver', | |
216 | b'confighash = %s mtimehash = %s\n', |
|
216 | b'confighash = %s mtimehash = %s\n', | |
217 | confighash, |
|
217 | confighash, | |
218 | mtimehash, |
|
218 | mtimehash, | |
219 | ) |
|
219 | ) | |
220 | return hashstate(confighash, mtimehash, mtimepaths) |
|
220 | return hashstate(confighash, mtimehash, mtimepaths) | |
221 |
|
221 | |||
222 |
|
222 | |||
223 | def _newchgui(srcui, csystem, attachio): |
|
223 | def _newchgui(srcui, csystem, attachio): | |
224 | class chgui(srcui.__class__): |
|
224 | class chgui(srcui.__class__): | |
225 | def __init__(self, src=None): |
|
225 | def __init__(self, src=None): | |
226 | super(chgui, self).__init__(src) |
|
226 | super(chgui, self).__init__(src) | |
227 | if src: |
|
227 | if src: | |
228 | self._csystem = getattr(src, '_csystem', csystem) |
|
228 | self._csystem = getattr(src, '_csystem', csystem) | |
229 | else: |
|
229 | else: | |
230 | self._csystem = csystem |
|
230 | self._csystem = csystem | |
231 |
|
231 | |||
232 | def _runsystem(self, cmd, environ, cwd, out): |
|
232 | def _runsystem(self, cmd, environ, cwd, out): | |
233 | # fallback to the original system method if |
|
233 | # fallback to the original system method if | |
234 | # a. the output stream is not stdout (e.g. stderr, cStringIO), |
|
234 | # a. the output stream is not stdout (e.g. stderr, cStringIO), | |
235 | # b. or stdout is redirected by protectfinout(), |
|
235 | # b. or stdout is redirected by protectfinout(), | |
236 | # because the chg client is not aware of these situations and |
|
236 | # because the chg client is not aware of these situations and | |
237 | # will behave differently (i.e. write to stdout). |
|
237 | # will behave differently (i.e. write to stdout). | |
238 | if ( |
|
238 | if ( | |
239 | out is not self.fout |
|
239 | out is not self.fout | |
240 | or not util.safehasattr(self.fout, b'fileno') |
|
240 | or not util.safehasattr(self.fout, b'fileno') | |
241 | or self.fout.fileno() != procutil.stdout.fileno() |
|
241 | or self.fout.fileno() != procutil.stdout.fileno() | |
242 | or self._finoutredirected |
|
242 | or self._finoutredirected | |
243 | ): |
|
243 | ): | |
244 | return procutil.system(cmd, environ=environ, cwd=cwd, out=out) |
|
244 | return procutil.system(cmd, environ=environ, cwd=cwd, out=out) | |
245 | self.flush() |
|
245 | self.flush() | |
246 | return self._csystem(cmd, procutil.shellenviron(environ), cwd) |
|
246 | return self._csystem(cmd, procutil.shellenviron(environ), cwd) | |
247 |
|
247 | |||
248 | def _runpager(self, cmd, env=None): |
|
248 | def _runpager(self, cmd, env=None): | |
249 | self._csystem( |
|
249 | self._csystem( | |
250 | cmd, |
|
250 | cmd, | |
251 | procutil.shellenviron(env), |
|
251 | procutil.shellenviron(env), | |
252 | type=b'pager', |
|
252 | type=b'pager', | |
253 | cmdtable={b'attachio': attachio}, |
|
253 | cmdtable={b'attachio': attachio}, | |
254 | ) |
|
254 | ) | |
255 | return True |
|
255 | return True | |
256 |
|
256 | |||
257 | return chgui(srcui) |
|
257 | return chgui(srcui) | |
258 |
|
258 | |||
259 |
|
259 | |||
260 | def _loadnewui(srcui, args, cdebug): |
|
260 | def _loadnewui(srcui, args, cdebug): | |
261 | from . import dispatch # avoid cycle |
|
261 | from . import dispatch # avoid cycle | |
262 |
|
262 | |||
263 | newui = srcui.__class__.load() |
|
263 | newui = srcui.__class__.load() | |
264 | for a in [b'fin', b'fout', b'ferr', b'environ']: |
|
264 | for a in [b'fin', b'fout', b'ferr', b'environ']: | |
265 | setattr(newui, a, getattr(srcui, a)) |
|
265 | setattr(newui, a, getattr(srcui, a)) | |
266 | if util.safehasattr(srcui, b'_csystem'): |
|
266 | if util.safehasattr(srcui, b'_csystem'): | |
267 | newui._csystem = srcui._csystem |
|
267 | newui._csystem = srcui._csystem | |
268 |
|
268 | |||
269 | # command line args |
|
269 | # command line args | |
270 | options = dispatch._earlyparseopts(newui, args) |
|
270 | options = dispatch._earlyparseopts(newui, args) | |
271 | dispatch._parseconfig(newui, options[b'config']) |
|
271 | dispatch._parseconfig(newui, options[b'config']) | |
272 |
|
272 | |||
273 | # stolen from tortoisehg.util.copydynamicconfig() |
|
273 | # stolen from tortoisehg.util.copydynamicconfig() | |
274 | for section, name, value in srcui.walkconfig(): |
|
274 | for section, name, value in srcui.walkconfig(): | |
275 | source = srcui.configsource(section, name) |
|
275 | source = srcui.configsource(section, name) | |
276 | if b':' in source or source == b'--config' or source.startswith(b'$'): |
|
276 | if b':' in source or source == b'--config' or source.startswith(b'$'): | |
277 | # path:line or command line, or environ |
|
277 | # path:line or command line, or environ | |
278 | continue |
|
278 | continue | |
279 | newui.setconfig(section, name, value, source) |
|
279 | newui.setconfig(section, name, value, source) | |
280 |
|
280 | |||
281 | # load wd and repo config, copied from dispatch.py |
|
281 | # load wd and repo config, copied from dispatch.py | |
282 | cwd = options[b'cwd'] |
|
282 | cwd = options[b'cwd'] | |
283 | cwd = cwd and os.path.realpath(cwd) or None |
|
283 | cwd = cwd and os.path.realpath(cwd) or None | |
284 | rpath = options[b'repository'] |
|
284 | rpath = options[b'repository'] | |
285 | path, newlui = dispatch._getlocal(newui, rpath, wd=cwd) |
|
285 | path, newlui = dispatch._getlocal(newui, rpath, wd=cwd) | |
286 |
|
286 | |||
287 | extensions.populateui(newui) |
|
287 | extensions.populateui(newui) | |
288 | commandserver.setuplogging(newui, fp=cdebug) |
|
288 | commandserver.setuplogging(newui, fp=cdebug) | |
289 | if newui is not newlui: |
|
289 | if newui is not newlui: | |
290 | extensions.populateui(newlui) |
|
290 | extensions.populateui(newlui) | |
291 | commandserver.setuplogging(newlui, fp=cdebug) |
|
291 | commandserver.setuplogging(newlui, fp=cdebug) | |
292 |
|
292 | |||
293 | return (newui, newlui) |
|
293 | return (newui, newlui) | |
294 |
|
294 | |||
295 |
|
295 | |||
296 | class channeledsystem(object): |
|
296 | class channeledsystem(object): | |
297 | """Propagate ui.system() request in the following format: |
|
297 | """Propagate ui.system() request in the following format: | |
298 |
|
298 | |||
299 | payload length (unsigned int), |
|
299 | payload length (unsigned int), | |
300 | type, '\0', |
|
300 | type, '\0', | |
301 | cmd, '\0', |
|
301 | cmd, '\0', | |
302 | cwd, '\0', |
|
302 | cwd, '\0', | |
303 | envkey, '=', val, '\0', |
|
303 | envkey, '=', val, '\0', | |
304 | ... |
|
304 | ... | |
305 | envkey, '=', val |
|
305 | envkey, '=', val | |
306 |
|
306 | |||
307 | if type == 'system', waits for: |
|
307 | if type == 'system', waits for: | |
308 |
|
308 | |||
309 | exitcode length (unsigned int), |
|
309 | exitcode length (unsigned int), | |
310 | exitcode (int) |
|
310 | exitcode (int) | |
311 |
|
311 | |||
312 | if type == 'pager', repetitively waits for a command name ending with '\n' |
|
312 | if type == 'pager', repetitively waits for a command name ending with '\n' | |
313 | and executes it defined by cmdtable, or exits the loop if the command name |
|
313 | and executes it defined by cmdtable, or exits the loop if the command name | |
314 | is empty. |
|
314 | is empty. | |
315 | """ |
|
315 | """ | |
316 |
|
316 | |||
317 | def __init__(self, in_, out, channel): |
|
317 | def __init__(self, in_, out, channel): | |
318 | self.in_ = in_ |
|
318 | self.in_ = in_ | |
319 | self.out = out |
|
319 | self.out = out | |
320 | self.channel = channel |
|
320 | self.channel = channel | |
321 |
|
321 | |||
322 | def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None): |
|
322 | def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None): | |
323 |
args = [type, |
|
323 | args = [type, cmd, os.path.abspath(cwd or b'.')] | |
324 | args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ)) |
|
324 | args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ)) | |
325 | data = b'\0'.join(args) |
|
325 | data = b'\0'.join(args) | |
326 | self.out.write(struct.pack(b'>cI', self.channel, len(data))) |
|
326 | self.out.write(struct.pack(b'>cI', self.channel, len(data))) | |
327 | self.out.write(data) |
|
327 | self.out.write(data) | |
328 | self.out.flush() |
|
328 | self.out.flush() | |
329 |
|
329 | |||
330 | if type == b'system': |
|
330 | if type == b'system': | |
331 | length = self.in_.read(4) |
|
331 | length = self.in_.read(4) | |
332 | (length,) = struct.unpack(b'>I', length) |
|
332 | (length,) = struct.unpack(b'>I', length) | |
333 | if length != 4: |
|
333 | if length != 4: | |
334 | raise error.Abort(_(b'invalid response')) |
|
334 | raise error.Abort(_(b'invalid response')) | |
335 | (rc,) = struct.unpack(b'>i', self.in_.read(4)) |
|
335 | (rc,) = struct.unpack(b'>i', self.in_.read(4)) | |
336 | return rc |
|
336 | return rc | |
337 | elif type == b'pager': |
|
337 | elif type == b'pager': | |
338 | while True: |
|
338 | while True: | |
339 | cmd = self.in_.readline()[:-1] |
|
339 | cmd = self.in_.readline()[:-1] | |
340 | if not cmd: |
|
340 | if not cmd: | |
341 | break |
|
341 | break | |
342 | if cmdtable and cmd in cmdtable: |
|
342 | if cmdtable and cmd in cmdtable: | |
343 | cmdtable[cmd]() |
|
343 | cmdtable[cmd]() | |
344 | else: |
|
344 | else: | |
345 | raise error.Abort(_(b'unexpected command: %s') % cmd) |
|
345 | raise error.Abort(_(b'unexpected command: %s') % cmd) | |
346 | else: |
|
346 | else: | |
347 | raise error.ProgrammingError(b'invalid S channel type: %s' % type) |
|
347 | raise error.ProgrammingError(b'invalid S channel type: %s' % type) | |
348 |
|
348 | |||
349 |
|
349 | |||
350 | _iochannels = [ |
|
350 | _iochannels = [ | |
351 | # server.ch, ui.fp, mode |
|
351 | # server.ch, ui.fp, mode | |
352 | (b'cin', b'fin', 'rb'), |
|
352 | (b'cin', b'fin', 'rb'), | |
353 | (b'cout', b'fout', 'wb'), |
|
353 | (b'cout', b'fout', 'wb'), | |
354 | (b'cerr', b'ferr', 'wb'), |
|
354 | (b'cerr', b'ferr', 'wb'), | |
355 | ] |
|
355 | ] | |
356 |
|
356 | |||
357 |
|
357 | |||
358 | class chgcmdserver(commandserver.server): |
|
358 | class chgcmdserver(commandserver.server): | |
359 | def __init__( |
|
359 | def __init__( | |
360 | self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress |
|
360 | self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress | |
361 | ): |
|
361 | ): | |
362 | super(chgcmdserver, self).__init__( |
|
362 | super(chgcmdserver, self).__init__( | |
363 | _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio), |
|
363 | _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio), | |
364 | repo, |
|
364 | repo, | |
365 | fin, |
|
365 | fin, | |
366 | fout, |
|
366 | fout, | |
367 | prereposetups, |
|
367 | prereposetups, | |
368 | ) |
|
368 | ) | |
369 | self.clientsock = sock |
|
369 | self.clientsock = sock | |
370 | self._ioattached = False |
|
370 | self._ioattached = False | |
371 | self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio" |
|
371 | self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio" | |
372 | self.hashstate = hashstate |
|
372 | self.hashstate = hashstate | |
373 | self.baseaddress = baseaddress |
|
373 | self.baseaddress = baseaddress | |
374 | if hashstate is not None: |
|
374 | if hashstate is not None: | |
375 | self.capabilities = self.capabilities.copy() |
|
375 | self.capabilities = self.capabilities.copy() | |
376 | self.capabilities[b'validate'] = chgcmdserver.validate |
|
376 | self.capabilities[b'validate'] = chgcmdserver.validate | |
377 |
|
377 | |||
378 | def cleanup(self): |
|
378 | def cleanup(self): | |
379 | super(chgcmdserver, self).cleanup() |
|
379 | super(chgcmdserver, self).cleanup() | |
380 | # dispatch._runcatch() does not flush outputs if exception is not |
|
380 | # dispatch._runcatch() does not flush outputs if exception is not | |
381 | # handled by dispatch._dispatch() |
|
381 | # handled by dispatch._dispatch() | |
382 | self.ui.flush() |
|
382 | self.ui.flush() | |
383 | self._restoreio() |
|
383 | self._restoreio() | |
384 | self._ioattached = False |
|
384 | self._ioattached = False | |
385 |
|
385 | |||
386 | def attachio(self): |
|
386 | def attachio(self): | |
387 | """Attach to client's stdio passed via unix domain socket; all |
|
387 | """Attach to client's stdio passed via unix domain socket; all | |
388 | channels except cresult will no longer be used |
|
388 | channels except cresult will no longer be used | |
389 | """ |
|
389 | """ | |
390 | # tell client to sendmsg() with 1-byte payload, which makes it |
|
390 | # tell client to sendmsg() with 1-byte payload, which makes it | |
391 | # distinctive from "attachio\n" command consumed by client.read() |
|
391 | # distinctive from "attachio\n" command consumed by client.read() | |
392 | self.clientsock.sendall(struct.pack(b'>cI', b'I', 1)) |
|
392 | self.clientsock.sendall(struct.pack(b'>cI', b'I', 1)) | |
393 | clientfds = util.recvfds(self.clientsock.fileno()) |
|
393 | clientfds = util.recvfds(self.clientsock.fileno()) | |
394 | self.ui.log(b'chgserver', b'received fds: %r\n', clientfds) |
|
394 | self.ui.log(b'chgserver', b'received fds: %r\n', clientfds) | |
395 |
|
395 | |||
396 | ui = self.ui |
|
396 | ui = self.ui | |
397 | ui.flush() |
|
397 | ui.flush() | |
398 | self._saveio() |
|
398 | self._saveio() | |
399 | for fd, (cn, fn, mode) in zip(clientfds, _iochannels): |
|
399 | for fd, (cn, fn, mode) in zip(clientfds, _iochannels): | |
400 | assert fd > 0 |
|
400 | assert fd > 0 | |
401 | fp = getattr(ui, fn) |
|
401 | fp = getattr(ui, fn) | |
402 | os.dup2(fd, fp.fileno()) |
|
402 | os.dup2(fd, fp.fileno()) | |
403 | os.close(fd) |
|
403 | os.close(fd) | |
404 | if self._ioattached: |
|
404 | if self._ioattached: | |
405 | continue |
|
405 | continue | |
406 | # reset buffering mode when client is first attached. as we want |
|
406 | # reset buffering mode when client is first attached. as we want | |
407 | # to see output immediately on pager, the mode stays unchanged |
|
407 | # to see output immediately on pager, the mode stays unchanged | |
408 | # when client re-attached. ferr is unchanged because it should |
|
408 | # when client re-attached. ferr is unchanged because it should | |
409 | # be unbuffered no matter if it is a tty or not. |
|
409 | # be unbuffered no matter if it is a tty or not. | |
410 | if fn == b'ferr': |
|
410 | if fn == b'ferr': | |
411 | newfp = fp |
|
411 | newfp = fp | |
412 | else: |
|
412 | else: | |
413 | # make it line buffered explicitly because the default is |
|
413 | # make it line buffered explicitly because the default is | |
414 | # decided on first write(), where fout could be a pager. |
|
414 | # decided on first write(), where fout could be a pager. | |
415 | if fp.isatty(): |
|
415 | if fp.isatty(): | |
416 | bufsize = 1 # line buffered |
|
416 | bufsize = 1 # line buffered | |
417 | else: |
|
417 | else: | |
418 | bufsize = -1 # system default |
|
418 | bufsize = -1 # system default | |
419 | newfp = os.fdopen(fp.fileno(), mode, bufsize) |
|
419 | newfp = os.fdopen(fp.fileno(), mode, bufsize) | |
420 | setattr(ui, fn, newfp) |
|
420 | setattr(ui, fn, newfp) | |
421 | setattr(self, cn, newfp) |
|
421 | setattr(self, cn, newfp) | |
422 |
|
422 | |||
423 | self._ioattached = True |
|
423 | self._ioattached = True | |
424 | self.cresult.write(struct.pack(b'>i', len(clientfds))) |
|
424 | self.cresult.write(struct.pack(b'>i', len(clientfds))) | |
425 |
|
425 | |||
426 | def _saveio(self): |
|
426 | def _saveio(self): | |
427 | if self._oldios: |
|
427 | if self._oldios: | |
428 | return |
|
428 | return | |
429 | ui = self.ui |
|
429 | ui = self.ui | |
430 | for cn, fn, _mode in _iochannels: |
|
430 | for cn, fn, _mode in _iochannels: | |
431 | ch = getattr(self, cn) |
|
431 | ch = getattr(self, cn) | |
432 | fp = getattr(ui, fn) |
|
432 | fp = getattr(ui, fn) | |
433 | fd = os.dup(fp.fileno()) |
|
433 | fd = os.dup(fp.fileno()) | |
434 | self._oldios.append((ch, fp, fd)) |
|
434 | self._oldios.append((ch, fp, fd)) | |
435 |
|
435 | |||
436 | def _restoreio(self): |
|
436 | def _restoreio(self): | |
437 | ui = self.ui |
|
437 | ui = self.ui | |
438 | for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels): |
|
438 | for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels): | |
439 | newfp = getattr(ui, fn) |
|
439 | newfp = getattr(ui, fn) | |
440 | # close newfp while it's associated with client; otherwise it |
|
440 | # close newfp while it's associated with client; otherwise it | |
441 | # would be closed when newfp is deleted |
|
441 | # would be closed when newfp is deleted | |
442 | if newfp is not fp: |
|
442 | if newfp is not fp: | |
443 | newfp.close() |
|
443 | newfp.close() | |
444 | # restore original fd: fp is open again |
|
444 | # restore original fd: fp is open again | |
445 | os.dup2(fd, fp.fileno()) |
|
445 | os.dup2(fd, fp.fileno()) | |
446 | os.close(fd) |
|
446 | os.close(fd) | |
447 | setattr(self, cn, ch) |
|
447 | setattr(self, cn, ch) | |
448 | setattr(ui, fn, fp) |
|
448 | setattr(ui, fn, fp) | |
449 | del self._oldios[:] |
|
449 | del self._oldios[:] | |
450 |
|
450 | |||
451 | def validate(self): |
|
451 | def validate(self): | |
452 | """Reload the config and check if the server is up to date |
|
452 | """Reload the config and check if the server is up to date | |
453 |
|
453 | |||
454 | Read a list of '\0' separated arguments. |
|
454 | Read a list of '\0' separated arguments. | |
455 | Write a non-empty list of '\0' separated instruction strings or '\0' |
|
455 | Write a non-empty list of '\0' separated instruction strings or '\0' | |
456 | if the list is empty. |
|
456 | if the list is empty. | |
457 | An instruction string could be either: |
|
457 | An instruction string could be either: | |
458 | - "unlink $path", the client should unlink the path to stop the |
|
458 | - "unlink $path", the client should unlink the path to stop the | |
459 | outdated server. |
|
459 | outdated server. | |
460 | - "redirect $path", the client should attempt to connect to $path |
|
460 | - "redirect $path", the client should attempt to connect to $path | |
461 | first. If it does not work, start a new server. It implies |
|
461 | first. If it does not work, start a new server. It implies | |
462 | "reconnect". |
|
462 | "reconnect". | |
463 | - "exit $n", the client should exit directly with code n. |
|
463 | - "exit $n", the client should exit directly with code n. | |
464 | This may happen if we cannot parse the config. |
|
464 | This may happen if we cannot parse the config. | |
465 | - "reconnect", the client should close the connection and |
|
465 | - "reconnect", the client should close the connection and | |
466 | reconnect. |
|
466 | reconnect. | |
467 | If neither "reconnect" nor "redirect" is included in the instruction |
|
467 | If neither "reconnect" nor "redirect" is included in the instruction | |
468 | list, the client can continue with this server after completing all |
|
468 | list, the client can continue with this server after completing all | |
469 | the instructions. |
|
469 | the instructions. | |
470 | """ |
|
470 | """ | |
471 | from . import dispatch # avoid cycle |
|
471 | from . import dispatch # avoid cycle | |
472 |
|
472 | |||
473 | args = self._readlist() |
|
473 | args = self._readlist() | |
474 | try: |
|
474 | try: | |
475 | self.ui, lui = _loadnewui(self.ui, args, self.cdebug) |
|
475 | self.ui, lui = _loadnewui(self.ui, args, self.cdebug) | |
476 | except error.ParseError as inst: |
|
476 | except error.ParseError as inst: | |
477 | dispatch._formatparse(self.ui.warn, inst) |
|
477 | dispatch._formatparse(self.ui.warn, inst) | |
478 | self.ui.flush() |
|
478 | self.ui.flush() | |
479 | self.cresult.write(b'exit 255') |
|
479 | self.cresult.write(b'exit 255') | |
480 | return |
|
480 | return | |
481 | except error.Abort as inst: |
|
481 | except error.Abort as inst: | |
482 | self.ui.error(_(b"abort: %s\n") % inst) |
|
482 | self.ui.error(_(b"abort: %s\n") % inst) | |
483 | if inst.hint: |
|
483 | if inst.hint: | |
484 | self.ui.error(_(b"(%s)\n") % inst.hint) |
|
484 | self.ui.error(_(b"(%s)\n") % inst.hint) | |
485 | self.ui.flush() |
|
485 | self.ui.flush() | |
486 | self.cresult.write(b'exit 255') |
|
486 | self.cresult.write(b'exit 255') | |
487 | return |
|
487 | return | |
488 | newhash = hashstate.fromui(lui, self.hashstate.mtimepaths) |
|
488 | newhash = hashstate.fromui(lui, self.hashstate.mtimepaths) | |
489 | insts = [] |
|
489 | insts = [] | |
490 | if newhash.mtimehash != self.hashstate.mtimehash: |
|
490 | if newhash.mtimehash != self.hashstate.mtimehash: | |
491 | addr = _hashaddress(self.baseaddress, self.hashstate.confighash) |
|
491 | addr = _hashaddress(self.baseaddress, self.hashstate.confighash) | |
492 | insts.append(b'unlink %s' % addr) |
|
492 | insts.append(b'unlink %s' % addr) | |
493 | # mtimehash is empty if one or more extensions fail to load. |
|
493 | # mtimehash is empty if one or more extensions fail to load. | |
494 | # to be compatible with hg, still serve the client this time. |
|
494 | # to be compatible with hg, still serve the client this time. | |
495 | if self.hashstate.mtimehash: |
|
495 | if self.hashstate.mtimehash: | |
496 | insts.append(b'reconnect') |
|
496 | insts.append(b'reconnect') | |
497 | if newhash.confighash != self.hashstate.confighash: |
|
497 | if newhash.confighash != self.hashstate.confighash: | |
498 | addr = _hashaddress(self.baseaddress, newhash.confighash) |
|
498 | addr = _hashaddress(self.baseaddress, newhash.confighash) | |
499 | insts.append(b'redirect %s' % addr) |
|
499 | insts.append(b'redirect %s' % addr) | |
500 | self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts)) |
|
500 | self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts)) | |
501 | self.cresult.write(b'\0'.join(insts) or b'\0') |
|
501 | self.cresult.write(b'\0'.join(insts) or b'\0') | |
502 |
|
502 | |||
503 | def chdir(self): |
|
503 | def chdir(self): | |
504 | """Change current directory |
|
504 | """Change current directory | |
505 |
|
505 | |||
506 | Note that the behavior of --cwd option is bit different from this. |
|
506 | Note that the behavior of --cwd option is bit different from this. | |
507 | It does not affect --config parameter. |
|
507 | It does not affect --config parameter. | |
508 | """ |
|
508 | """ | |
509 | path = self._readstr() |
|
509 | path = self._readstr() | |
510 | if not path: |
|
510 | if not path: | |
511 | return |
|
511 | return | |
512 | self.ui.log(b'chgserver', b"chdir to '%s'\n", path) |
|
512 | self.ui.log(b'chgserver', b"chdir to '%s'\n", path) | |
513 | os.chdir(path) |
|
513 | os.chdir(path) | |
514 |
|
514 | |||
515 | def setumask(self): |
|
515 | def setumask(self): | |
516 | """Change umask (DEPRECATED)""" |
|
516 | """Change umask (DEPRECATED)""" | |
517 | # BUG: this does not follow the message frame structure, but kept for |
|
517 | # BUG: this does not follow the message frame structure, but kept for | |
518 | # backward compatibility with old chg clients for some time |
|
518 | # backward compatibility with old chg clients for some time | |
519 | self._setumask(self._read(4)) |
|
519 | self._setumask(self._read(4)) | |
520 |
|
520 | |||
521 | def setumask2(self): |
|
521 | def setumask2(self): | |
522 | """Change umask""" |
|
522 | """Change umask""" | |
523 | data = self._readstr() |
|
523 | data = self._readstr() | |
524 | if len(data) != 4: |
|
524 | if len(data) != 4: | |
525 | raise ValueError(b'invalid mask length in setumask2 request') |
|
525 | raise ValueError(b'invalid mask length in setumask2 request') | |
526 | self._setumask(data) |
|
526 | self._setumask(data) | |
527 |
|
527 | |||
528 | def _setumask(self, data): |
|
528 | def _setumask(self, data): | |
529 | mask = struct.unpack(b'>I', data)[0] |
|
529 | mask = struct.unpack(b'>I', data)[0] | |
530 | self.ui.log(b'chgserver', b'setumask %r\n', mask) |
|
530 | self.ui.log(b'chgserver', b'setumask %r\n', mask) | |
531 | util.setumask(mask) |
|
531 | util.setumask(mask) | |
532 |
|
532 | |||
533 | def runcommand(self): |
|
533 | def runcommand(self): | |
534 | # pager may be attached within the runcommand session, which should |
|
534 | # pager may be attached within the runcommand session, which should | |
535 | # be detached at the end of the session. otherwise the pager wouldn't |
|
535 | # be detached at the end of the session. otherwise the pager wouldn't | |
536 | # receive EOF. |
|
536 | # receive EOF. | |
537 | globaloldios = self._oldios |
|
537 | globaloldios = self._oldios | |
538 | self._oldios = [] |
|
538 | self._oldios = [] | |
539 | try: |
|
539 | try: | |
540 | return super(chgcmdserver, self).runcommand() |
|
540 | return super(chgcmdserver, self).runcommand() | |
541 | finally: |
|
541 | finally: | |
542 | self._restoreio() |
|
542 | self._restoreio() | |
543 | self._oldios = globaloldios |
|
543 | self._oldios = globaloldios | |
544 |
|
544 | |||
545 | def setenv(self): |
|
545 | def setenv(self): | |
546 | """Clear and update os.environ |
|
546 | """Clear and update os.environ | |
547 |
|
547 | |||
548 | Note that not all variables can make an effect on the running process. |
|
548 | Note that not all variables can make an effect on the running process. | |
549 | """ |
|
549 | """ | |
550 | l = self._readlist() |
|
550 | l = self._readlist() | |
551 | try: |
|
551 | try: | |
552 | newenv = dict(s.split(b'=', 1) for s in l) |
|
552 | newenv = dict(s.split(b'=', 1) for s in l) | |
553 | except ValueError: |
|
553 | except ValueError: | |
554 | raise ValueError(b'unexpected value in setenv request') |
|
554 | raise ValueError(b'unexpected value in setenv request') | |
555 | self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys())) |
|
555 | self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys())) | |
556 |
|
556 | |||
557 | encoding.environ.clear() |
|
557 | encoding.environ.clear() | |
558 | encoding.environ.update(newenv) |
|
558 | encoding.environ.update(newenv) | |
559 |
|
559 | |||
560 | capabilities = commandserver.server.capabilities.copy() |
|
560 | capabilities = commandserver.server.capabilities.copy() | |
561 | capabilities.update( |
|
561 | capabilities.update( | |
562 | { |
|
562 | { | |
563 | b'attachio': attachio, |
|
563 | b'attachio': attachio, | |
564 | b'chdir': chdir, |
|
564 | b'chdir': chdir, | |
565 | b'runcommand': runcommand, |
|
565 | b'runcommand': runcommand, | |
566 | b'setenv': setenv, |
|
566 | b'setenv': setenv, | |
567 | b'setumask': setumask, |
|
567 | b'setumask': setumask, | |
568 | b'setumask2': setumask2, |
|
568 | b'setumask2': setumask2, | |
569 | } |
|
569 | } | |
570 | ) |
|
570 | ) | |
571 |
|
571 | |||
572 | if util.safehasattr(procutil, b'setprocname'): |
|
572 | if util.safehasattr(procutil, b'setprocname'): | |
573 |
|
573 | |||
574 | def setprocname(self): |
|
574 | def setprocname(self): | |
575 | """Change process title""" |
|
575 | """Change process title""" | |
576 | name = self._readstr() |
|
576 | name = self._readstr() | |
577 | self.ui.log(b'chgserver', b'setprocname: %r\n', name) |
|
577 | self.ui.log(b'chgserver', b'setprocname: %r\n', name) | |
578 | procutil.setprocname(name) |
|
578 | procutil.setprocname(name) | |
579 |
|
579 | |||
580 | capabilities[b'setprocname'] = setprocname |
|
580 | capabilities[b'setprocname'] = setprocname | |
581 |
|
581 | |||
582 |
|
582 | |||
583 | def _tempaddress(address): |
|
583 | def _tempaddress(address): | |
584 | return b'%s.%d.tmp' % (address, os.getpid()) |
|
584 | return b'%s.%d.tmp' % (address, os.getpid()) | |
585 |
|
585 | |||
586 |
|
586 | |||
587 | def _hashaddress(address, hashstr): |
|
587 | def _hashaddress(address, hashstr): | |
588 | # if the basename of address contains '.', use only the left part. this |
|
588 | # if the basename of address contains '.', use only the left part. this | |
589 | # makes it possible for the client to pass 'server.tmp$PID' and follow by |
|
589 | # makes it possible for the client to pass 'server.tmp$PID' and follow by | |
590 | # an atomic rename to avoid locking when spawning new servers. |
|
590 | # an atomic rename to avoid locking when spawning new servers. | |
591 | dirname, basename = os.path.split(address) |
|
591 | dirname, basename = os.path.split(address) | |
592 | basename = basename.split(b'.', 1)[0] |
|
592 | basename = basename.split(b'.', 1)[0] | |
593 | return b'%s-%s' % (os.path.join(dirname, basename), hashstr) |
|
593 | return b'%s-%s' % (os.path.join(dirname, basename), hashstr) | |
594 |
|
594 | |||
595 |
|
595 | |||
596 | class chgunixservicehandler(object): |
|
596 | class chgunixservicehandler(object): | |
597 | """Set of operations for chg services""" |
|
597 | """Set of operations for chg services""" | |
598 |
|
598 | |||
599 | pollinterval = 1 # [sec] |
|
599 | pollinterval = 1 # [sec] | |
600 |
|
600 | |||
601 | def __init__(self, ui): |
|
601 | def __init__(self, ui): | |
602 | self.ui = ui |
|
602 | self.ui = ui | |
603 | self._idletimeout = ui.configint(b'chgserver', b'idletimeout') |
|
603 | self._idletimeout = ui.configint(b'chgserver', b'idletimeout') | |
604 | self._lastactive = time.time() |
|
604 | self._lastactive = time.time() | |
605 |
|
605 | |||
606 | def bindsocket(self, sock, address): |
|
606 | def bindsocket(self, sock, address): | |
607 | self._inithashstate(address) |
|
607 | self._inithashstate(address) | |
608 | self._checkextensions() |
|
608 | self._checkextensions() | |
609 | self._bind(sock) |
|
609 | self._bind(sock) | |
610 | self._createsymlink() |
|
610 | self._createsymlink() | |
611 | # no "listening at" message should be printed to simulate hg behavior |
|
611 | # no "listening at" message should be printed to simulate hg behavior | |
612 |
|
612 | |||
613 | def _inithashstate(self, address): |
|
613 | def _inithashstate(self, address): | |
614 | self._baseaddress = address |
|
614 | self._baseaddress = address | |
615 | if self.ui.configbool(b'chgserver', b'skiphash'): |
|
615 | if self.ui.configbool(b'chgserver', b'skiphash'): | |
616 | self._hashstate = None |
|
616 | self._hashstate = None | |
617 | self._realaddress = address |
|
617 | self._realaddress = address | |
618 | return |
|
618 | return | |
619 | self._hashstate = hashstate.fromui(self.ui) |
|
619 | self._hashstate = hashstate.fromui(self.ui) | |
620 | self._realaddress = _hashaddress(address, self._hashstate.confighash) |
|
620 | self._realaddress = _hashaddress(address, self._hashstate.confighash) | |
621 |
|
621 | |||
622 | def _checkextensions(self): |
|
622 | def _checkextensions(self): | |
623 | if not self._hashstate: |
|
623 | if not self._hashstate: | |
624 | return |
|
624 | return | |
625 | if extensions.notloaded(): |
|
625 | if extensions.notloaded(): | |
626 | # one or more extensions failed to load. mtimehash becomes |
|
626 | # one or more extensions failed to load. mtimehash becomes | |
627 | # meaningless because we do not know the paths of those extensions. |
|
627 | # meaningless because we do not know the paths of those extensions. | |
628 | # set mtimehash to an illegal hash value to invalidate the server. |
|
628 | # set mtimehash to an illegal hash value to invalidate the server. | |
629 | self._hashstate.mtimehash = b'' |
|
629 | self._hashstate.mtimehash = b'' | |
630 |
|
630 | |||
631 | def _bind(self, sock): |
|
631 | def _bind(self, sock): | |
632 | # use a unique temp address so we can stat the file and do ownership |
|
632 | # use a unique temp address so we can stat the file and do ownership | |
633 | # check later |
|
633 | # check later | |
634 | tempaddress = _tempaddress(self._realaddress) |
|
634 | tempaddress = _tempaddress(self._realaddress) | |
635 | util.bindunixsocket(sock, tempaddress) |
|
635 | util.bindunixsocket(sock, tempaddress) | |
636 | self._socketstat = os.stat(tempaddress) |
|
636 | self._socketstat = os.stat(tempaddress) | |
637 | sock.listen(socket.SOMAXCONN) |
|
637 | sock.listen(socket.SOMAXCONN) | |
638 | # rename will replace the old socket file if exists atomically. the |
|
638 | # rename will replace the old socket file if exists atomically. the | |
639 | # old server will detect ownership change and exit. |
|
639 | # old server will detect ownership change and exit. | |
640 | util.rename(tempaddress, self._realaddress) |
|
640 | util.rename(tempaddress, self._realaddress) | |
641 |
|
641 | |||
642 | def _createsymlink(self): |
|
642 | def _createsymlink(self): | |
643 | if self._baseaddress == self._realaddress: |
|
643 | if self._baseaddress == self._realaddress: | |
644 | return |
|
644 | return | |
645 | tempaddress = _tempaddress(self._baseaddress) |
|
645 | tempaddress = _tempaddress(self._baseaddress) | |
646 | os.symlink(os.path.basename(self._realaddress), tempaddress) |
|
646 | os.symlink(os.path.basename(self._realaddress), tempaddress) | |
647 | util.rename(tempaddress, self._baseaddress) |
|
647 | util.rename(tempaddress, self._baseaddress) | |
648 |
|
648 | |||
649 | def _issocketowner(self): |
|
649 | def _issocketowner(self): | |
650 | try: |
|
650 | try: | |
651 | st = os.stat(self._realaddress) |
|
651 | st = os.stat(self._realaddress) | |
652 | return ( |
|
652 | return ( | |
653 | st.st_ino == self._socketstat.st_ino |
|
653 | st.st_ino == self._socketstat.st_ino | |
654 | and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME] |
|
654 | and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME] | |
655 | ) |
|
655 | ) | |
656 | except OSError: |
|
656 | except OSError: | |
657 | return False |
|
657 | return False | |
658 |
|
658 | |||
659 | def unlinksocket(self, address): |
|
659 | def unlinksocket(self, address): | |
660 | if not self._issocketowner(): |
|
660 | if not self._issocketowner(): | |
661 | return |
|
661 | return | |
662 | # it is possible to have a race condition here that we may |
|
662 | # it is possible to have a race condition here that we may | |
663 | # remove another server's socket file. but that's okay |
|
663 | # remove another server's socket file. but that's okay | |
664 | # since that server will detect and exit automatically and |
|
664 | # since that server will detect and exit automatically and | |
665 | # the client will start a new server on demand. |
|
665 | # the client will start a new server on demand. | |
666 | util.tryunlink(self._realaddress) |
|
666 | util.tryunlink(self._realaddress) | |
667 |
|
667 | |||
668 | def shouldexit(self): |
|
668 | def shouldexit(self): | |
669 | if not self._issocketowner(): |
|
669 | if not self._issocketowner(): | |
670 | self.ui.log( |
|
670 | self.ui.log( | |
671 | b'chgserver', b'%s is not owned, exiting.\n', self._realaddress |
|
671 | b'chgserver', b'%s is not owned, exiting.\n', self._realaddress | |
672 | ) |
|
672 | ) | |
673 | return True |
|
673 | return True | |
674 | if time.time() - self._lastactive > self._idletimeout: |
|
674 | if time.time() - self._lastactive > self._idletimeout: | |
675 | self.ui.log(b'chgserver', b'being idle too long. exiting.\n') |
|
675 | self.ui.log(b'chgserver', b'being idle too long. exiting.\n') | |
676 | return True |
|
676 | return True | |
677 | return False |
|
677 | return False | |
678 |
|
678 | |||
679 | def newconnection(self): |
|
679 | def newconnection(self): | |
680 | self._lastactive = time.time() |
|
680 | self._lastactive = time.time() | |
681 |
|
681 | |||
682 | def createcmdserver(self, repo, conn, fin, fout, prereposetups): |
|
682 | def createcmdserver(self, repo, conn, fin, fout, prereposetups): | |
683 | return chgcmdserver( |
|
683 | return chgcmdserver( | |
684 | self.ui, |
|
684 | self.ui, | |
685 | repo, |
|
685 | repo, | |
686 | fin, |
|
686 | fin, | |
687 | fout, |
|
687 | fout, | |
688 | conn, |
|
688 | conn, | |
689 | prereposetups, |
|
689 | prereposetups, | |
690 | self._hashstate, |
|
690 | self._hashstate, | |
691 | self._baseaddress, |
|
691 | self._baseaddress, | |
692 | ) |
|
692 | ) | |
693 |
|
693 | |||
694 |
|
694 | |||
695 | def chgunixservice(ui, repo, opts): |
|
695 | def chgunixservice(ui, repo, opts): | |
696 | # CHGINTERNALMARK is set by chg client. It is an indication of things are |
|
696 | # CHGINTERNALMARK is set by chg client. It is an indication of things are | |
697 | # started by chg so other code can do things accordingly, like disabling |
|
697 | # started by chg so other code can do things accordingly, like disabling | |
698 | # demandimport or detecting chg client started by chg client. When executed |
|
698 | # demandimport or detecting chg client started by chg client. When executed | |
699 | # here, CHGINTERNALMARK is no longer useful and hence dropped to make |
|
699 | # here, CHGINTERNALMARK is no longer useful and hence dropped to make | |
700 | # environ cleaner. |
|
700 | # environ cleaner. | |
701 | if b'CHGINTERNALMARK' in encoding.environ: |
|
701 | if b'CHGINTERNALMARK' in encoding.environ: | |
702 | del encoding.environ[b'CHGINTERNALMARK'] |
|
702 | del encoding.environ[b'CHGINTERNALMARK'] | |
703 | # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if |
|
703 | # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if | |
704 | # it thinks the current value is "C". This breaks the hash computation and |
|
704 | # it thinks the current value is "C". This breaks the hash computation and | |
705 | # causes chg to restart loop. |
|
705 | # causes chg to restart loop. | |
706 | if b'CHGORIG_LC_CTYPE' in encoding.environ: |
|
706 | if b'CHGORIG_LC_CTYPE' in encoding.environ: | |
707 | encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE'] |
|
707 | encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE'] | |
708 | del encoding.environ[b'CHGORIG_LC_CTYPE'] |
|
708 | del encoding.environ[b'CHGORIG_LC_CTYPE'] | |
709 | elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ: |
|
709 | elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ: | |
710 | if b'LC_CTYPE' in encoding.environ: |
|
710 | if b'LC_CTYPE' in encoding.environ: | |
711 | del encoding.environ[b'LC_CTYPE'] |
|
711 | del encoding.environ[b'LC_CTYPE'] | |
712 | del encoding.environ[b'CHG_CLEAR_LC_CTYPE'] |
|
712 | del encoding.environ[b'CHG_CLEAR_LC_CTYPE'] | |
713 |
|
713 | |||
714 | if repo: |
|
714 | if repo: | |
715 | # one chgserver can serve multiple repos. drop repo information |
|
715 | # one chgserver can serve multiple repos. drop repo information | |
716 | ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo') |
|
716 | ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo') | |
717 | h = chgunixservicehandler(ui) |
|
717 | h = chgunixservicehandler(ui) | |
718 | return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h) |
|
718 | return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h) |
@@ -1,779 +1,775 b'' | |||||
1 | # posix.py - Posix utility function implementations for Mercurial |
|
1 | # posix.py - Posix utility function implementations for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import fcntl |
|
11 | import fcntl | |
12 | import getpass |
|
12 | import getpass | |
13 | import grp |
|
13 | import grp | |
14 | import os |
|
14 | import os | |
15 | import pwd |
|
15 | import pwd | |
16 | import re |
|
16 | import re | |
17 | import select |
|
17 | import select | |
18 | import stat |
|
18 | import stat | |
19 | import sys |
|
19 | import sys | |
20 | import tempfile |
|
20 | import tempfile | |
21 | import unicodedata |
|
21 | import unicodedata | |
22 |
|
22 | |||
23 | from .i18n import _ |
|
23 | from .i18n import _ | |
24 | from .pycompat import ( |
|
24 | from .pycompat import ( | |
25 | getattr, |
|
25 | getattr, | |
26 | open, |
|
26 | open, | |
27 | ) |
|
27 | ) | |
28 | from . import ( |
|
28 | from . import ( | |
29 | encoding, |
|
29 | encoding, | |
30 | error, |
|
30 | error, | |
31 | policy, |
|
31 | policy, | |
32 | pycompat, |
|
32 | pycompat, | |
33 | ) |
|
33 | ) | |
34 |
|
34 | |||
35 | osutil = policy.importmod('osutil') |
|
35 | osutil = policy.importmod('osutil') | |
36 |
|
36 | |||
37 | normpath = os.path.normpath |
|
37 | normpath = os.path.normpath | |
38 | samestat = os.path.samestat |
|
38 | samestat = os.path.samestat | |
39 | try: |
|
39 | try: | |
40 | oslink = os.link |
|
40 | oslink = os.link | |
41 | except AttributeError: |
|
41 | except AttributeError: | |
42 | # Some platforms build Python without os.link on systems that are |
|
42 | # Some platforms build Python without os.link on systems that are | |
43 | # vaguely unix-like but don't have hardlink support. For those |
|
43 | # vaguely unix-like but don't have hardlink support. For those | |
44 | # poor souls, just say we tried and that it failed so we fall back |
|
44 | # poor souls, just say we tried and that it failed so we fall back | |
45 | # to copies. |
|
45 | # to copies. | |
46 | def oslink(src, dst): |
|
46 | def oslink(src, dst): | |
47 | raise OSError( |
|
47 | raise OSError( | |
48 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) |
|
48 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) | |
49 | ) |
|
49 | ) | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | readlink = os.readlink |
|
52 | readlink = os.readlink | |
53 | unlink = os.unlink |
|
53 | unlink = os.unlink | |
54 | rename = os.rename |
|
54 | rename = os.rename | |
55 | removedirs = os.removedirs |
|
55 | removedirs = os.removedirs | |
56 | expandglobs = False |
|
56 | expandglobs = False | |
57 |
|
57 | |||
58 | umask = os.umask(0) |
|
58 | umask = os.umask(0) | |
59 | os.umask(umask) |
|
59 | os.umask(umask) | |
60 |
|
60 | |||
61 | if not pycompat.ispy3: |
|
61 | if not pycompat.ispy3: | |
62 |
|
62 | |||
63 | def posixfile(name, mode='r', buffering=-1): |
|
63 | def posixfile(name, mode='r', buffering=-1): | |
64 | fp = open(name, mode=mode, buffering=buffering) |
|
64 | fp = open(name, mode=mode, buffering=buffering) | |
65 | # The position when opening in append mode is implementation defined, so |
|
65 | # The position when opening in append mode is implementation defined, so | |
66 | # make it consistent by always seeking to the end. |
|
66 | # make it consistent by always seeking to the end. | |
67 | if 'a' in mode: |
|
67 | if 'a' in mode: | |
68 | fp.seek(0, os.SEEK_END) |
|
68 | fp.seek(0, os.SEEK_END) | |
69 | return fp |
|
69 | return fp | |
70 |
|
70 | |||
71 |
|
71 | |||
72 | else: |
|
72 | else: | |
73 | # The underlying file object seeks as required in Python 3: |
|
73 | # The underlying file object seeks as required in Python 3: | |
74 | # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474 |
|
74 | # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474 | |
75 | posixfile = open |
|
75 | posixfile = open | |
76 |
|
76 | |||
77 |
|
77 | |||
78 | def split(p): |
|
78 | def split(p): | |
79 | '''Same as posixpath.split, but faster |
|
79 | '''Same as posixpath.split, but faster | |
80 |
|
80 | |||
81 | >>> import posixpath |
|
81 | >>> import posixpath | |
82 | >>> for f in [b'/absolute/path/to/file', |
|
82 | >>> for f in [b'/absolute/path/to/file', | |
83 | ... b'relative/path/to/file', |
|
83 | ... b'relative/path/to/file', | |
84 | ... b'file_alone', |
|
84 | ... b'file_alone', | |
85 | ... b'path/to/directory/', |
|
85 | ... b'path/to/directory/', | |
86 | ... b'/multiple/path//separators', |
|
86 | ... b'/multiple/path//separators', | |
87 | ... b'/file_at_root', |
|
87 | ... b'/file_at_root', | |
88 | ... b'///multiple_leading_separators_at_root', |
|
88 | ... b'///multiple_leading_separators_at_root', | |
89 | ... b'']: |
|
89 | ... b'']: | |
90 | ... assert split(f) == posixpath.split(f), f |
|
90 | ... assert split(f) == posixpath.split(f), f | |
91 | ''' |
|
91 | ''' | |
92 | ht = p.rsplit(b'/', 1) |
|
92 | ht = p.rsplit(b'/', 1) | |
93 | if len(ht) == 1: |
|
93 | if len(ht) == 1: | |
94 | return b'', p |
|
94 | return b'', p | |
95 | nh = ht[0].rstrip(b'/') |
|
95 | nh = ht[0].rstrip(b'/') | |
96 | if nh: |
|
96 | if nh: | |
97 | return nh, ht[1] |
|
97 | return nh, ht[1] | |
98 | return ht[0] + b'/', ht[1] |
|
98 | return ht[0] + b'/', ht[1] | |
99 |
|
99 | |||
100 |
|
100 | |||
101 | def openhardlinks(): |
|
101 | def openhardlinks(): | |
102 | '''return true if it is safe to hold open file handles to hardlinks''' |
|
102 | '''return true if it is safe to hold open file handles to hardlinks''' | |
103 | return True |
|
103 | return True | |
104 |
|
104 | |||
105 |
|
105 | |||
106 | def nlinks(name): |
|
106 | def nlinks(name): | |
107 | '''return number of hardlinks for the given file''' |
|
107 | '''return number of hardlinks for the given file''' | |
108 | return os.lstat(name).st_nlink |
|
108 | return os.lstat(name).st_nlink | |
109 |
|
109 | |||
110 |
|
110 | |||
111 | def parsepatchoutput(output_line): |
|
111 | def parsepatchoutput(output_line): | |
112 | """parses the output produced by patch and returns the filename""" |
|
112 | """parses the output produced by patch and returns the filename""" | |
113 | pf = output_line[14:] |
|
113 | pf = output_line[14:] | |
114 | if pycompat.sysplatform == b'OpenVMS': |
|
114 | if pycompat.sysplatform == b'OpenVMS': | |
115 | if pf[0] == b'`': |
|
115 | if pf[0] == b'`': | |
116 | pf = pf[1:-1] # Remove the quotes |
|
116 | pf = pf[1:-1] # Remove the quotes | |
117 | else: |
|
117 | else: | |
118 | if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf: |
|
118 | if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf: | |
119 | pf = pf[1:-1] # Remove the quotes |
|
119 | pf = pf[1:-1] # Remove the quotes | |
120 | return pf |
|
120 | return pf | |
121 |
|
121 | |||
122 |
|
122 | |||
123 | def sshargs(sshcmd, host, user, port): |
|
123 | def sshargs(sshcmd, host, user, port): | |
124 | '''Build argument list for ssh''' |
|
124 | '''Build argument list for ssh''' | |
125 | args = user and (b"%s@%s" % (user, host)) or host |
|
125 | args = user and (b"%s@%s" % (user, host)) or host | |
126 | if b'-' in args[:1]: |
|
126 | if b'-' in args[:1]: | |
127 | raise error.Abort( |
|
127 | raise error.Abort( | |
128 | _(b'illegal ssh hostname or username starting with -: %s') % args |
|
128 | _(b'illegal ssh hostname or username starting with -: %s') % args | |
129 | ) |
|
129 | ) | |
130 | args = shellquote(args) |
|
130 | args = shellquote(args) | |
131 | if port: |
|
131 | if port: | |
132 | args = b'-p %s %s' % (shellquote(port), args) |
|
132 | args = b'-p %s %s' % (shellquote(port), args) | |
133 | return args |
|
133 | return args | |
134 |
|
134 | |||
135 |
|
135 | |||
136 | def isexec(f): |
|
136 | def isexec(f): | |
137 | """check whether a file is executable""" |
|
137 | """check whether a file is executable""" | |
138 | return os.lstat(f).st_mode & 0o100 != 0 |
|
138 | return os.lstat(f).st_mode & 0o100 != 0 | |
139 |
|
139 | |||
140 |
|
140 | |||
141 | def setflags(f, l, x): |
|
141 | def setflags(f, l, x): | |
142 | st = os.lstat(f) |
|
142 | st = os.lstat(f) | |
143 | s = st.st_mode |
|
143 | s = st.st_mode | |
144 | if l: |
|
144 | if l: | |
145 | if not stat.S_ISLNK(s): |
|
145 | if not stat.S_ISLNK(s): | |
146 | # switch file to link |
|
146 | # switch file to link | |
147 | fp = open(f, b'rb') |
|
147 | fp = open(f, b'rb') | |
148 | data = fp.read() |
|
148 | data = fp.read() | |
149 | fp.close() |
|
149 | fp.close() | |
150 | unlink(f) |
|
150 | unlink(f) | |
151 | try: |
|
151 | try: | |
152 | os.symlink(data, f) |
|
152 | os.symlink(data, f) | |
153 | except OSError: |
|
153 | except OSError: | |
154 | # failed to make a link, rewrite file |
|
154 | # failed to make a link, rewrite file | |
155 | fp = open(f, b"wb") |
|
155 | fp = open(f, b"wb") | |
156 | fp.write(data) |
|
156 | fp.write(data) | |
157 | fp.close() |
|
157 | fp.close() | |
158 | # no chmod needed at this point |
|
158 | # no chmod needed at this point | |
159 | return |
|
159 | return | |
160 | if stat.S_ISLNK(s): |
|
160 | if stat.S_ISLNK(s): | |
161 | # switch link to file |
|
161 | # switch link to file | |
162 | data = os.readlink(f) |
|
162 | data = os.readlink(f) | |
163 | unlink(f) |
|
163 | unlink(f) | |
164 | fp = open(f, b"wb") |
|
164 | fp = open(f, b"wb") | |
165 | fp.write(data) |
|
165 | fp.write(data) | |
166 | fp.close() |
|
166 | fp.close() | |
167 | s = 0o666 & ~umask # avoid restatting for chmod |
|
167 | s = 0o666 & ~umask # avoid restatting for chmod | |
168 |
|
168 | |||
169 | sx = s & 0o100 |
|
169 | sx = s & 0o100 | |
170 | if st.st_nlink > 1 and bool(x) != bool(sx): |
|
170 | if st.st_nlink > 1 and bool(x) != bool(sx): | |
171 | # the file is a hardlink, break it |
|
171 | # the file is a hardlink, break it | |
172 | with open(f, b"rb") as fp: |
|
172 | with open(f, b"rb") as fp: | |
173 | data = fp.read() |
|
173 | data = fp.read() | |
174 | unlink(f) |
|
174 | unlink(f) | |
175 | with open(f, b"wb") as fp: |
|
175 | with open(f, b"wb") as fp: | |
176 | fp.write(data) |
|
176 | fp.write(data) | |
177 |
|
177 | |||
178 | if x and not sx: |
|
178 | if x and not sx: | |
179 | # Turn on +x for every +r bit when making a file executable |
|
179 | # Turn on +x for every +r bit when making a file executable | |
180 | # and obey umask. |
|
180 | # and obey umask. | |
181 | os.chmod(f, s | (s & 0o444) >> 2 & ~umask) |
|
181 | os.chmod(f, s | (s & 0o444) >> 2 & ~umask) | |
182 | elif not x and sx: |
|
182 | elif not x and sx: | |
183 | # Turn off all +x bits |
|
183 | # Turn off all +x bits | |
184 | os.chmod(f, s & 0o666) |
|
184 | os.chmod(f, s & 0o666) | |
185 |
|
185 | |||
186 |
|
186 | |||
187 | def copymode(src, dst, mode=None, enforcewritable=False): |
|
187 | def copymode(src, dst, mode=None, enforcewritable=False): | |
188 | '''Copy the file mode from the file at path src to dst. |
|
188 | '''Copy the file mode from the file at path src to dst. | |
189 | If src doesn't exist, we're using mode instead. If mode is None, we're |
|
189 | If src doesn't exist, we're using mode instead. If mode is None, we're | |
190 | using umask.''' |
|
190 | using umask.''' | |
191 | try: |
|
191 | try: | |
192 | st_mode = os.lstat(src).st_mode & 0o777 |
|
192 | st_mode = os.lstat(src).st_mode & 0o777 | |
193 | except OSError as inst: |
|
193 | except OSError as inst: | |
194 | if inst.errno != errno.ENOENT: |
|
194 | if inst.errno != errno.ENOENT: | |
195 | raise |
|
195 | raise | |
196 | st_mode = mode |
|
196 | st_mode = mode | |
197 | if st_mode is None: |
|
197 | if st_mode is None: | |
198 | st_mode = ~umask |
|
198 | st_mode = ~umask | |
199 | st_mode &= 0o666 |
|
199 | st_mode &= 0o666 | |
200 |
|
200 | |||
201 | new_mode = st_mode |
|
201 | new_mode = st_mode | |
202 |
|
202 | |||
203 | if enforcewritable: |
|
203 | if enforcewritable: | |
204 | new_mode |= stat.S_IWUSR |
|
204 | new_mode |= stat.S_IWUSR | |
205 |
|
205 | |||
206 | os.chmod(dst, new_mode) |
|
206 | os.chmod(dst, new_mode) | |
207 |
|
207 | |||
208 |
|
208 | |||
209 | def checkexec(path): |
|
209 | def checkexec(path): | |
210 | """ |
|
210 | """ | |
211 | Check whether the given path is on a filesystem with UNIX-like exec flags |
|
211 | Check whether the given path is on a filesystem with UNIX-like exec flags | |
212 |
|
212 | |||
213 | Requires a directory (like /foo/.hg) |
|
213 | Requires a directory (like /foo/.hg) | |
214 | """ |
|
214 | """ | |
215 |
|
215 | |||
216 | # VFAT on some Linux versions can flip mode but it doesn't persist |
|
216 | # VFAT on some Linux versions can flip mode but it doesn't persist | |
217 | # a FS remount. Frequently we can detect it if files are created |
|
217 | # a FS remount. Frequently we can detect it if files are created | |
218 | # with exec bit on. |
|
218 | # with exec bit on. | |
219 |
|
219 | |||
220 | try: |
|
220 | try: | |
221 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
|
221 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | |
222 | basedir = os.path.join(path, b'.hg') |
|
222 | basedir = os.path.join(path, b'.hg') | |
223 | cachedir = os.path.join(basedir, b'wcache') |
|
223 | cachedir = os.path.join(basedir, b'wcache') | |
224 | storedir = os.path.join(basedir, b'store') |
|
224 | storedir = os.path.join(basedir, b'store') | |
225 | if not os.path.exists(cachedir): |
|
225 | if not os.path.exists(cachedir): | |
226 | try: |
|
226 | try: | |
227 | # we want to create the 'cache' directory, not the '.hg' one. |
|
227 | # we want to create the 'cache' directory, not the '.hg' one. | |
228 | # Automatically creating '.hg' directory could silently spawn |
|
228 | # Automatically creating '.hg' directory could silently spawn | |
229 | # invalid Mercurial repositories. That seems like a bad idea. |
|
229 | # invalid Mercurial repositories. That seems like a bad idea. | |
230 | os.mkdir(cachedir) |
|
230 | os.mkdir(cachedir) | |
231 | if os.path.exists(storedir): |
|
231 | if os.path.exists(storedir): | |
232 | copymode(storedir, cachedir) |
|
232 | copymode(storedir, cachedir) | |
233 | else: |
|
233 | else: | |
234 | copymode(basedir, cachedir) |
|
234 | copymode(basedir, cachedir) | |
235 | except (IOError, OSError): |
|
235 | except (IOError, OSError): | |
236 | # we other fallback logic triggers |
|
236 | # we other fallback logic triggers | |
237 | pass |
|
237 | pass | |
238 | if os.path.isdir(cachedir): |
|
238 | if os.path.isdir(cachedir): | |
239 | checkisexec = os.path.join(cachedir, b'checkisexec') |
|
239 | checkisexec = os.path.join(cachedir, b'checkisexec') | |
240 | checknoexec = os.path.join(cachedir, b'checknoexec') |
|
240 | checknoexec = os.path.join(cachedir, b'checknoexec') | |
241 |
|
241 | |||
242 | try: |
|
242 | try: | |
243 | m = os.stat(checkisexec).st_mode |
|
243 | m = os.stat(checkisexec).st_mode | |
244 | except OSError as e: |
|
244 | except OSError as e: | |
245 | if e.errno != errno.ENOENT: |
|
245 | if e.errno != errno.ENOENT: | |
246 | raise |
|
246 | raise | |
247 | # checkisexec does not exist - fall through ... |
|
247 | # checkisexec does not exist - fall through ... | |
248 | else: |
|
248 | else: | |
249 | # checkisexec exists, check if it actually is exec |
|
249 | # checkisexec exists, check if it actually is exec | |
250 | if m & EXECFLAGS != 0: |
|
250 | if m & EXECFLAGS != 0: | |
251 | # ensure checkisexec exists, check it isn't exec |
|
251 | # ensure checkisexec exists, check it isn't exec | |
252 | try: |
|
252 | try: | |
253 | m = os.stat(checknoexec).st_mode |
|
253 | m = os.stat(checknoexec).st_mode | |
254 | except OSError as e: |
|
254 | except OSError as e: | |
255 | if e.errno != errno.ENOENT: |
|
255 | if e.errno != errno.ENOENT: | |
256 | raise |
|
256 | raise | |
257 | open(checknoexec, b'w').close() # might fail |
|
257 | open(checknoexec, b'w').close() # might fail | |
258 | m = os.stat(checknoexec).st_mode |
|
258 | m = os.stat(checknoexec).st_mode | |
259 | if m & EXECFLAGS == 0: |
|
259 | if m & EXECFLAGS == 0: | |
260 | # check-exec is exec and check-no-exec is not exec |
|
260 | # check-exec is exec and check-no-exec is not exec | |
261 | return True |
|
261 | return True | |
262 | # checknoexec exists but is exec - delete it |
|
262 | # checknoexec exists but is exec - delete it | |
263 | unlink(checknoexec) |
|
263 | unlink(checknoexec) | |
264 | # checkisexec exists but is not exec - delete it |
|
264 | # checkisexec exists but is not exec - delete it | |
265 | unlink(checkisexec) |
|
265 | unlink(checkisexec) | |
266 |
|
266 | |||
267 | # check using one file, leave it as checkisexec |
|
267 | # check using one file, leave it as checkisexec | |
268 | checkdir = cachedir |
|
268 | checkdir = cachedir | |
269 | else: |
|
269 | else: | |
270 | # check directly in path and don't leave checkisexec behind |
|
270 | # check directly in path and don't leave checkisexec behind | |
271 | checkdir = path |
|
271 | checkdir = path | |
272 | checkisexec = None |
|
272 | checkisexec = None | |
273 | fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-') |
|
273 | fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-') | |
274 | try: |
|
274 | try: | |
275 | os.close(fh) |
|
275 | os.close(fh) | |
276 | m = os.stat(fn).st_mode |
|
276 | m = os.stat(fn).st_mode | |
277 | if m & EXECFLAGS == 0: |
|
277 | if m & EXECFLAGS == 0: | |
278 | os.chmod(fn, m & 0o777 | EXECFLAGS) |
|
278 | os.chmod(fn, m & 0o777 | EXECFLAGS) | |
279 | if os.stat(fn).st_mode & EXECFLAGS != 0: |
|
279 | if os.stat(fn).st_mode & EXECFLAGS != 0: | |
280 | if checkisexec is not None: |
|
280 | if checkisexec is not None: | |
281 | os.rename(fn, checkisexec) |
|
281 | os.rename(fn, checkisexec) | |
282 | fn = None |
|
282 | fn = None | |
283 | return True |
|
283 | return True | |
284 | finally: |
|
284 | finally: | |
285 | if fn is not None: |
|
285 | if fn is not None: | |
286 | unlink(fn) |
|
286 | unlink(fn) | |
287 | except (IOError, OSError): |
|
287 | except (IOError, OSError): | |
288 | # we don't care, the user probably won't be able to commit anyway |
|
288 | # we don't care, the user probably won't be able to commit anyway | |
289 | return False |
|
289 | return False | |
290 |
|
290 | |||
291 |
|
291 | |||
292 | def checklink(path): |
|
292 | def checklink(path): | |
293 | """check whether the given path is on a symlink-capable filesystem""" |
|
293 | """check whether the given path is on a symlink-capable filesystem""" | |
294 | # mktemp is not racy because symlink creation will fail if the |
|
294 | # mktemp is not racy because symlink creation will fail if the | |
295 | # file already exists |
|
295 | # file already exists | |
296 | while True: |
|
296 | while True: | |
297 | cachedir = os.path.join(path, b'.hg', b'wcache') |
|
297 | cachedir = os.path.join(path, b'.hg', b'wcache') | |
298 | checklink = os.path.join(cachedir, b'checklink') |
|
298 | checklink = os.path.join(cachedir, b'checklink') | |
299 | # try fast path, read only |
|
299 | # try fast path, read only | |
300 | if os.path.islink(checklink): |
|
300 | if os.path.islink(checklink): | |
301 | return True |
|
301 | return True | |
302 | if os.path.isdir(cachedir): |
|
302 | if os.path.isdir(cachedir): | |
303 | checkdir = cachedir |
|
303 | checkdir = cachedir | |
304 | else: |
|
304 | else: | |
305 | checkdir = path |
|
305 | checkdir = path | |
306 | cachedir = None |
|
306 | cachedir = None | |
307 | name = tempfile.mktemp( |
|
307 | name = tempfile.mktemp( | |
308 | dir=pycompat.fsdecode(checkdir), prefix=r'checklink-' |
|
308 | dir=pycompat.fsdecode(checkdir), prefix=r'checklink-' | |
309 | ) |
|
309 | ) | |
310 | name = pycompat.fsencode(name) |
|
310 | name = pycompat.fsencode(name) | |
311 | try: |
|
311 | try: | |
312 | fd = None |
|
312 | fd = None | |
313 | if cachedir is None: |
|
313 | if cachedir is None: | |
314 | fd = pycompat.namedtempfile( |
|
314 | fd = pycompat.namedtempfile( | |
315 | dir=checkdir, prefix=b'hg-checklink-' |
|
315 | dir=checkdir, prefix=b'hg-checklink-' | |
316 | ) |
|
316 | ) | |
317 | target = os.path.basename(fd.name) |
|
317 | target = os.path.basename(fd.name) | |
318 | else: |
|
318 | else: | |
319 | # create a fixed file to link to; doesn't matter if it |
|
319 | # create a fixed file to link to; doesn't matter if it | |
320 | # already exists. |
|
320 | # already exists. | |
321 | target = b'checklink-target' |
|
321 | target = b'checklink-target' | |
322 | try: |
|
322 | try: | |
323 | fullpath = os.path.join(cachedir, target) |
|
323 | fullpath = os.path.join(cachedir, target) | |
324 | open(fullpath, b'w').close() |
|
324 | open(fullpath, b'w').close() | |
325 | except IOError as inst: |
|
325 | except IOError as inst: | |
326 | if ( |
|
326 | if ( | |
327 | inst[0] == errno.EACCES |
|
327 | inst[0] == errno.EACCES | |
328 | ): # pytype: disable=unsupported-operands |
|
328 | ): # pytype: disable=unsupported-operands | |
329 | # If we can't write to cachedir, just pretend |
|
329 | # If we can't write to cachedir, just pretend | |
330 | # that the fs is readonly and by association |
|
330 | # that the fs is readonly and by association | |
331 | # that the fs won't support symlinks. This |
|
331 | # that the fs won't support symlinks. This | |
332 | # seems like the least dangerous way to avoid |
|
332 | # seems like the least dangerous way to avoid | |
333 | # data loss. |
|
333 | # data loss. | |
334 | return False |
|
334 | return False | |
335 | raise |
|
335 | raise | |
336 | try: |
|
336 | try: | |
337 | os.symlink(target, name) |
|
337 | os.symlink(target, name) | |
338 | if cachedir is None: |
|
338 | if cachedir is None: | |
339 | unlink(name) |
|
339 | unlink(name) | |
340 | else: |
|
340 | else: | |
341 | try: |
|
341 | try: | |
342 | os.rename(name, checklink) |
|
342 | os.rename(name, checklink) | |
343 | except OSError: |
|
343 | except OSError: | |
344 | unlink(name) |
|
344 | unlink(name) | |
345 | return True |
|
345 | return True | |
346 | except OSError as inst: |
|
346 | except OSError as inst: | |
347 | # link creation might race, try again |
|
347 | # link creation might race, try again | |
348 | if inst.errno == errno.EEXIST: |
|
348 | if inst.errno == errno.EEXIST: | |
349 | continue |
|
349 | continue | |
350 | raise |
|
350 | raise | |
351 | finally: |
|
351 | finally: | |
352 | if fd is not None: |
|
352 | if fd is not None: | |
353 | fd.close() |
|
353 | fd.close() | |
354 | except AttributeError: |
|
354 | except AttributeError: | |
355 | return False |
|
355 | return False | |
356 | except OSError as inst: |
|
356 | except OSError as inst: | |
357 | # sshfs might report failure while successfully creating the link |
|
357 | # sshfs might report failure while successfully creating the link | |
358 | if inst.errno == errno.EIO and os.path.exists(name): |
|
358 | if inst.errno == errno.EIO and os.path.exists(name): | |
359 | unlink(name) |
|
359 | unlink(name) | |
360 | return False |
|
360 | return False | |
361 |
|
361 | |||
362 |
|
362 | |||
363 | def checkosfilename(path): |
|
363 | def checkosfilename(path): | |
364 | '''Check that the base-relative path is a valid filename on this platform. |
|
364 | '''Check that the base-relative path is a valid filename on this platform. | |
365 | Returns None if the path is ok, or a UI string describing the problem.''' |
|
365 | Returns None if the path is ok, or a UI string describing the problem.''' | |
366 | return None # on posix platforms, every path is ok |
|
366 | return None # on posix platforms, every path is ok | |
367 |
|
367 | |||
368 |
|
368 | |||
369 | def getfsmountpoint(dirpath): |
|
369 | def getfsmountpoint(dirpath): | |
370 | '''Get the filesystem mount point from a directory (best-effort) |
|
370 | '''Get the filesystem mount point from a directory (best-effort) | |
371 |
|
371 | |||
372 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
372 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | |
373 | ''' |
|
373 | ''' | |
374 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) |
|
374 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) | |
375 |
|
375 | |||
376 |
|
376 | |||
377 | def getfstype(dirpath): |
|
377 | def getfstype(dirpath): | |
378 | '''Get the filesystem type name from a directory (best-effort) |
|
378 | '''Get the filesystem type name from a directory (best-effort) | |
379 |
|
379 | |||
380 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
380 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | |
381 | ''' |
|
381 | ''' | |
382 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) |
|
382 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) | |
383 |
|
383 | |||
384 |
|
384 | |||
385 | def setbinary(fd): |
|
385 | def setbinary(fd): | |
386 | pass |
|
386 | pass | |
387 |
|
387 | |||
388 |
|
388 | |||
389 | def pconvert(path): |
|
389 | def pconvert(path): | |
390 | return path |
|
390 | return path | |
391 |
|
391 | |||
392 |
|
392 | |||
393 | def localpath(path): |
|
393 | def localpath(path): | |
394 | return path |
|
394 | return path | |
395 |
|
395 | |||
396 |
|
396 | |||
397 | def samefile(fpath1, fpath2): |
|
397 | def samefile(fpath1, fpath2): | |
398 | """Returns whether path1 and path2 refer to the same file. This is only |
|
398 | """Returns whether path1 and path2 refer to the same file. This is only | |
399 | guaranteed to work for files, not directories.""" |
|
399 | guaranteed to work for files, not directories.""" | |
400 | return os.path.samefile(fpath1, fpath2) |
|
400 | return os.path.samefile(fpath1, fpath2) | |
401 |
|
401 | |||
402 |
|
402 | |||
403 | def samedevice(fpath1, fpath2): |
|
403 | def samedevice(fpath1, fpath2): | |
404 | """Returns whether fpath1 and fpath2 are on the same device. This is only |
|
404 | """Returns whether fpath1 and fpath2 are on the same device. This is only | |
405 | guaranteed to work for files, not directories.""" |
|
405 | guaranteed to work for files, not directories.""" | |
406 | st1 = os.lstat(fpath1) |
|
406 | st1 = os.lstat(fpath1) | |
407 | st2 = os.lstat(fpath2) |
|
407 | st2 = os.lstat(fpath2) | |
408 | return st1.st_dev == st2.st_dev |
|
408 | return st1.st_dev == st2.st_dev | |
409 |
|
409 | |||
410 |
|
410 | |||
411 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems |
|
411 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems | |
412 | def normcase(path): |
|
412 | def normcase(path): | |
413 | return path.lower() |
|
413 | return path.lower() | |
414 |
|
414 | |||
415 |
|
415 | |||
416 | # what normcase does to ASCII strings |
|
416 | # what normcase does to ASCII strings | |
417 | normcasespec = encoding.normcasespecs.lower |
|
417 | normcasespec = encoding.normcasespecs.lower | |
418 | # fallback normcase function for non-ASCII strings |
|
418 | # fallback normcase function for non-ASCII strings | |
419 | normcasefallback = normcase |
|
419 | normcasefallback = normcase | |
420 |
|
420 | |||
421 | if pycompat.isdarwin: |
|
421 | if pycompat.isdarwin: | |
422 |
|
422 | |||
423 | def normcase(path): |
|
423 | def normcase(path): | |
424 | ''' |
|
424 | ''' | |
425 | Normalize a filename for OS X-compatible comparison: |
|
425 | Normalize a filename for OS X-compatible comparison: | |
426 | - escape-encode invalid characters |
|
426 | - escape-encode invalid characters | |
427 | - decompose to NFD |
|
427 | - decompose to NFD | |
428 | - lowercase |
|
428 | - lowercase | |
429 | - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] |
|
429 | - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] | |
430 |
|
430 | |||
431 | >>> normcase(b'UPPER') |
|
431 | >>> normcase(b'UPPER') | |
432 | 'upper' |
|
432 | 'upper' | |
433 | >>> normcase(b'Caf\\xc3\\xa9') |
|
433 | >>> normcase(b'Caf\\xc3\\xa9') | |
434 | 'cafe\\xcc\\x81' |
|
434 | 'cafe\\xcc\\x81' | |
435 | >>> normcase(b'\\xc3\\x89') |
|
435 | >>> normcase(b'\\xc3\\x89') | |
436 | 'e\\xcc\\x81' |
|
436 | 'e\\xcc\\x81' | |
437 | >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 |
|
437 | >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 | |
438 | '%b8%ca%c3\\xca\\xbe%c8.jpg' |
|
438 | '%b8%ca%c3\\xca\\xbe%c8.jpg' | |
439 | ''' |
|
439 | ''' | |
440 |
|
440 | |||
441 | try: |
|
441 | try: | |
442 | return encoding.asciilower(path) # exception for non-ASCII |
|
442 | return encoding.asciilower(path) # exception for non-ASCII | |
443 | except UnicodeDecodeError: |
|
443 | except UnicodeDecodeError: | |
444 | return normcasefallback(path) |
|
444 | return normcasefallback(path) | |
445 |
|
445 | |||
446 | normcasespec = encoding.normcasespecs.lower |
|
446 | normcasespec = encoding.normcasespecs.lower | |
447 |
|
447 | |||
448 | def normcasefallback(path): |
|
448 | def normcasefallback(path): | |
449 | try: |
|
449 | try: | |
450 | u = path.decode('utf-8') |
|
450 | u = path.decode('utf-8') | |
451 | except UnicodeDecodeError: |
|
451 | except UnicodeDecodeError: | |
452 | # OS X percent-encodes any bytes that aren't valid utf-8 |
|
452 | # OS X percent-encodes any bytes that aren't valid utf-8 | |
453 | s = b'' |
|
453 | s = b'' | |
454 | pos = 0 |
|
454 | pos = 0 | |
455 | l = len(path) |
|
455 | l = len(path) | |
456 | while pos < l: |
|
456 | while pos < l: | |
457 | try: |
|
457 | try: | |
458 | c = encoding.getutf8char(path, pos) |
|
458 | c = encoding.getutf8char(path, pos) | |
459 | pos += len(c) |
|
459 | pos += len(c) | |
460 | except ValueError: |
|
460 | except ValueError: | |
461 | c = b'%%%02X' % ord(path[pos : pos + 1]) |
|
461 | c = b'%%%02X' % ord(path[pos : pos + 1]) | |
462 | pos += 1 |
|
462 | pos += 1 | |
463 | s += c |
|
463 | s += c | |
464 |
|
464 | |||
465 | u = s.decode('utf-8') |
|
465 | u = s.decode('utf-8') | |
466 |
|
466 | |||
467 | # Decompose then lowercase (HFS+ technote specifies lower) |
|
467 | # Decompose then lowercase (HFS+ technote specifies lower) | |
468 | enc = unicodedata.normalize('NFD', u).lower().encode('utf-8') |
|
468 | enc = unicodedata.normalize('NFD', u).lower().encode('utf-8') | |
469 | # drop HFS+ ignored characters |
|
469 | # drop HFS+ ignored characters | |
470 | return encoding.hfsignoreclean(enc) |
|
470 | return encoding.hfsignoreclean(enc) | |
471 |
|
471 | |||
472 |
|
472 | |||
473 | if pycompat.sysplatform == b'cygwin': |
|
473 | if pycompat.sysplatform == b'cygwin': | |
474 | # workaround for cygwin, in which mount point part of path is |
|
474 | # workaround for cygwin, in which mount point part of path is | |
475 | # treated as case sensitive, even though underlying NTFS is case |
|
475 | # treated as case sensitive, even though underlying NTFS is case | |
476 | # insensitive. |
|
476 | # insensitive. | |
477 |
|
477 | |||
478 | # default mount points |
|
478 | # default mount points | |
479 | cygwinmountpoints = sorted( |
|
479 | cygwinmountpoints = sorted( | |
480 | [b"/usr/bin", b"/usr/lib", b"/cygdrive",], reverse=True |
|
480 | [b"/usr/bin", b"/usr/lib", b"/cygdrive",], reverse=True | |
481 | ) |
|
481 | ) | |
482 |
|
482 | |||
483 | # use upper-ing as normcase as same as NTFS workaround |
|
483 | # use upper-ing as normcase as same as NTFS workaround | |
484 | def normcase(path): |
|
484 | def normcase(path): | |
485 | pathlen = len(path) |
|
485 | pathlen = len(path) | |
486 | if (pathlen == 0) or (path[0] != pycompat.ossep): |
|
486 | if (pathlen == 0) or (path[0] != pycompat.ossep): | |
487 | # treat as relative |
|
487 | # treat as relative | |
488 | return encoding.upper(path) |
|
488 | return encoding.upper(path) | |
489 |
|
489 | |||
490 | # to preserve case of mountpoint part |
|
490 | # to preserve case of mountpoint part | |
491 | for mp in cygwinmountpoints: |
|
491 | for mp in cygwinmountpoints: | |
492 | if not path.startswith(mp): |
|
492 | if not path.startswith(mp): | |
493 | continue |
|
493 | continue | |
494 |
|
494 | |||
495 | mplen = len(mp) |
|
495 | mplen = len(mp) | |
496 | if mplen == pathlen: # mount point itself |
|
496 | if mplen == pathlen: # mount point itself | |
497 | return mp |
|
497 | return mp | |
498 | if path[mplen] == pycompat.ossep: |
|
498 | if path[mplen] == pycompat.ossep: | |
499 | return mp + encoding.upper(path[mplen:]) |
|
499 | return mp + encoding.upper(path[mplen:]) | |
500 |
|
500 | |||
501 | return encoding.upper(path) |
|
501 | return encoding.upper(path) | |
502 |
|
502 | |||
503 | normcasespec = encoding.normcasespecs.other |
|
503 | normcasespec = encoding.normcasespecs.other | |
504 | normcasefallback = normcase |
|
504 | normcasefallback = normcase | |
505 |
|
505 | |||
506 | # Cygwin translates native ACLs to POSIX permissions, |
|
506 | # Cygwin translates native ACLs to POSIX permissions, | |
507 | # but these translations are not supported by native |
|
507 | # but these translations are not supported by native | |
508 | # tools, so the exec bit tends to be set erroneously. |
|
508 | # tools, so the exec bit tends to be set erroneously. | |
509 | # Therefore, disable executable bit access on Cygwin. |
|
509 | # Therefore, disable executable bit access on Cygwin. | |
510 | def checkexec(path): |
|
510 | def checkexec(path): | |
511 | return False |
|
511 | return False | |
512 |
|
512 | |||
513 | # Similarly, Cygwin's symlink emulation is likely to create |
|
513 | # Similarly, Cygwin's symlink emulation is likely to create | |
514 | # problems when Mercurial is used from both Cygwin and native |
|
514 | # problems when Mercurial is used from both Cygwin and native | |
515 | # Windows, with other native tools, or on shared volumes |
|
515 | # Windows, with other native tools, or on shared volumes | |
516 | def checklink(path): |
|
516 | def checklink(path): | |
517 | return False |
|
517 | return False | |
518 |
|
518 | |||
519 |
|
519 | |||
520 | _needsshellquote = None |
|
520 | _needsshellquote = None | |
521 |
|
521 | |||
522 |
|
522 | |||
523 | def shellquote(s): |
|
523 | def shellquote(s): | |
524 | if pycompat.sysplatform == b'OpenVMS': |
|
524 | if pycompat.sysplatform == b'OpenVMS': | |
525 | return b'"%s"' % s |
|
525 | return b'"%s"' % s | |
526 | global _needsshellquote |
|
526 | global _needsshellquote | |
527 | if _needsshellquote is None: |
|
527 | if _needsshellquote is None: | |
528 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search |
|
528 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search | |
529 | if s and not _needsshellquote(s): |
|
529 | if s and not _needsshellquote(s): | |
530 | # "s" shouldn't have to be quoted |
|
530 | # "s" shouldn't have to be quoted | |
531 | return s |
|
531 | return s | |
532 | else: |
|
532 | else: | |
533 | return b"'%s'" % s.replace(b"'", b"'\\''") |
|
533 | return b"'%s'" % s.replace(b"'", b"'\\''") | |
534 |
|
534 | |||
535 |
|
535 | |||
536 | def shellsplit(s): |
|
536 | def shellsplit(s): | |
537 | """Parse a command string in POSIX shell way (best-effort)""" |
|
537 | """Parse a command string in POSIX shell way (best-effort)""" | |
538 | return pycompat.shlexsplit(s, posix=True) |
|
538 | return pycompat.shlexsplit(s, posix=True) | |
539 |
|
539 | |||
540 |
|
540 | |||
541 | def quotecommand(cmd): |
|
|||
542 | return cmd |
|
|||
543 |
|
||||
544 |
|
||||
545 | def testpid(pid): |
|
541 | def testpid(pid): | |
546 | '''return False if pid dead, True if running or not sure''' |
|
542 | '''return False if pid dead, True if running or not sure''' | |
547 | if pycompat.sysplatform == b'OpenVMS': |
|
543 | if pycompat.sysplatform == b'OpenVMS': | |
548 | return True |
|
544 | return True | |
549 | try: |
|
545 | try: | |
550 | os.kill(pid, 0) |
|
546 | os.kill(pid, 0) | |
551 | return True |
|
547 | return True | |
552 | except OSError as inst: |
|
548 | except OSError as inst: | |
553 | return inst.errno != errno.ESRCH |
|
549 | return inst.errno != errno.ESRCH | |
554 |
|
550 | |||
555 |
|
551 | |||
556 | def isowner(st): |
|
552 | def isowner(st): | |
557 | """Return True if the stat object st is from the current user.""" |
|
553 | """Return True if the stat object st is from the current user.""" | |
558 | return st.st_uid == os.getuid() |
|
554 | return st.st_uid == os.getuid() | |
559 |
|
555 | |||
560 |
|
556 | |||
561 | def findexe(command): |
|
557 | def findexe(command): | |
562 | '''Find executable for command searching like which does. |
|
558 | '''Find executable for command searching like which does. | |
563 | If command is a basename then PATH is searched for command. |
|
559 | If command is a basename then PATH is searched for command. | |
564 | PATH isn't searched if command is an absolute or relative path. |
|
560 | PATH isn't searched if command is an absolute or relative path. | |
565 | If command isn't found None is returned.''' |
|
561 | If command isn't found None is returned.''' | |
566 | if pycompat.sysplatform == b'OpenVMS': |
|
562 | if pycompat.sysplatform == b'OpenVMS': | |
567 | return command |
|
563 | return command | |
568 |
|
564 | |||
569 | def findexisting(executable): |
|
565 | def findexisting(executable): | |
570 | b'Will return executable if existing file' |
|
566 | b'Will return executable if existing file' | |
571 | if os.path.isfile(executable) and os.access(executable, os.X_OK): |
|
567 | if os.path.isfile(executable) and os.access(executable, os.X_OK): | |
572 | return executable |
|
568 | return executable | |
573 | return None |
|
569 | return None | |
574 |
|
570 | |||
575 | if pycompat.ossep in command: |
|
571 | if pycompat.ossep in command: | |
576 | return findexisting(command) |
|
572 | return findexisting(command) | |
577 |
|
573 | |||
578 | if pycompat.sysplatform == b'plan9': |
|
574 | if pycompat.sysplatform == b'plan9': | |
579 | return findexisting(os.path.join(b'/bin', command)) |
|
575 | return findexisting(os.path.join(b'/bin', command)) | |
580 |
|
576 | |||
581 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
577 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): | |
582 | executable = findexisting(os.path.join(path, command)) |
|
578 | executable = findexisting(os.path.join(path, command)) | |
583 | if executable is not None: |
|
579 | if executable is not None: | |
584 | return executable |
|
580 | return executable | |
585 | return None |
|
581 | return None | |
586 |
|
582 | |||
587 |
|
583 | |||
588 | def setsignalhandler(): |
|
584 | def setsignalhandler(): | |
589 | pass |
|
585 | pass | |
590 |
|
586 | |||
591 |
|
587 | |||
592 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
588 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | |
593 |
|
589 | |||
594 |
|
590 | |||
595 | def statfiles(files): |
|
591 | def statfiles(files): | |
596 | '''Stat each file in files. Yield each stat, or None if a file does not |
|
592 | '''Stat each file in files. Yield each stat, or None if a file does not | |
597 | exist or has a type we don't care about.''' |
|
593 | exist or has a type we don't care about.''' | |
598 | lstat = os.lstat |
|
594 | lstat = os.lstat | |
599 | getkind = stat.S_IFMT |
|
595 | getkind = stat.S_IFMT | |
600 | for nf in files: |
|
596 | for nf in files: | |
601 | try: |
|
597 | try: | |
602 | st = lstat(nf) |
|
598 | st = lstat(nf) | |
603 | if getkind(st.st_mode) not in _wantedkinds: |
|
599 | if getkind(st.st_mode) not in _wantedkinds: | |
604 | st = None |
|
600 | st = None | |
605 | except OSError as err: |
|
601 | except OSError as err: | |
606 | if err.errno not in (errno.ENOENT, errno.ENOTDIR): |
|
602 | if err.errno not in (errno.ENOENT, errno.ENOTDIR): | |
607 | raise |
|
603 | raise | |
608 | st = None |
|
604 | st = None | |
609 | yield st |
|
605 | yield st | |
610 |
|
606 | |||
611 |
|
607 | |||
612 | def getuser(): |
|
608 | def getuser(): | |
613 | '''return name of current user''' |
|
609 | '''return name of current user''' | |
614 | return pycompat.fsencode(getpass.getuser()) |
|
610 | return pycompat.fsencode(getpass.getuser()) | |
615 |
|
611 | |||
616 |
|
612 | |||
617 | def username(uid=None): |
|
613 | def username(uid=None): | |
618 | """Return the name of the user with the given uid. |
|
614 | """Return the name of the user with the given uid. | |
619 |
|
615 | |||
620 | If uid is None, return the name of the current user.""" |
|
616 | If uid is None, return the name of the current user.""" | |
621 |
|
617 | |||
622 | if uid is None: |
|
618 | if uid is None: | |
623 | uid = os.getuid() |
|
619 | uid = os.getuid() | |
624 | try: |
|
620 | try: | |
625 | return pycompat.fsencode(pwd.getpwuid(uid)[0]) |
|
621 | return pycompat.fsencode(pwd.getpwuid(uid)[0]) | |
626 | except KeyError: |
|
622 | except KeyError: | |
627 | return b'%d' % uid |
|
623 | return b'%d' % uid | |
628 |
|
624 | |||
629 |
|
625 | |||
630 | def groupname(gid=None): |
|
626 | def groupname(gid=None): | |
631 | """Return the name of the group with the given gid. |
|
627 | """Return the name of the group with the given gid. | |
632 |
|
628 | |||
633 | If gid is None, return the name of the current group.""" |
|
629 | If gid is None, return the name of the current group.""" | |
634 |
|
630 | |||
635 | if gid is None: |
|
631 | if gid is None: | |
636 | gid = os.getgid() |
|
632 | gid = os.getgid() | |
637 | try: |
|
633 | try: | |
638 | return pycompat.fsencode(grp.getgrgid(gid)[0]) |
|
634 | return pycompat.fsencode(grp.getgrgid(gid)[0]) | |
639 | except KeyError: |
|
635 | except KeyError: | |
640 | return pycompat.bytestr(gid) |
|
636 | return pycompat.bytestr(gid) | |
641 |
|
637 | |||
642 |
|
638 | |||
643 | def groupmembers(name): |
|
639 | def groupmembers(name): | |
644 | """Return the list of members of the group with the given |
|
640 | """Return the list of members of the group with the given | |
645 | name, KeyError if the group does not exist. |
|
641 | name, KeyError if the group does not exist. | |
646 | """ |
|
642 | """ | |
647 | name = pycompat.fsdecode(name) |
|
643 | name = pycompat.fsdecode(name) | |
648 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) |
|
644 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) | |
649 |
|
645 | |||
650 |
|
646 | |||
651 | def spawndetached(args): |
|
647 | def spawndetached(args): | |
652 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) |
|
648 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) | |
653 |
|
649 | |||
654 |
|
650 | |||
655 | def gethgcmd(): |
|
651 | def gethgcmd(): | |
656 | return sys.argv[:1] |
|
652 | return sys.argv[:1] | |
657 |
|
653 | |||
658 |
|
654 | |||
659 | def makedir(path, notindexed): |
|
655 | def makedir(path, notindexed): | |
660 | os.mkdir(path) |
|
656 | os.mkdir(path) | |
661 |
|
657 | |||
662 |
|
658 | |||
663 | def lookupreg(key, name=None, scope=None): |
|
659 | def lookupreg(key, name=None, scope=None): | |
664 | return None |
|
660 | return None | |
665 |
|
661 | |||
666 |
|
662 | |||
667 | def hidewindow(): |
|
663 | def hidewindow(): | |
668 | """Hide current shell window. |
|
664 | """Hide current shell window. | |
669 |
|
665 | |||
670 | Used to hide the window opened when starting asynchronous |
|
666 | Used to hide the window opened when starting asynchronous | |
671 | child process under Windows, unneeded on other systems. |
|
667 | child process under Windows, unneeded on other systems. | |
672 | """ |
|
668 | """ | |
673 | pass |
|
669 | pass | |
674 |
|
670 | |||
675 |
|
671 | |||
676 | class cachestat(object): |
|
672 | class cachestat(object): | |
677 | def __init__(self, path): |
|
673 | def __init__(self, path): | |
678 | self.stat = os.stat(path) |
|
674 | self.stat = os.stat(path) | |
679 |
|
675 | |||
680 | def cacheable(self): |
|
676 | def cacheable(self): | |
681 | return bool(self.stat.st_ino) |
|
677 | return bool(self.stat.st_ino) | |
682 |
|
678 | |||
683 | __hash__ = object.__hash__ |
|
679 | __hash__ = object.__hash__ | |
684 |
|
680 | |||
685 | def __eq__(self, other): |
|
681 | def __eq__(self, other): | |
686 | try: |
|
682 | try: | |
687 | # Only dev, ino, size, mtime and atime are likely to change. Out |
|
683 | # Only dev, ino, size, mtime and atime are likely to change. Out | |
688 | # of these, we shouldn't compare atime but should compare the |
|
684 | # of these, we shouldn't compare atime but should compare the | |
689 | # rest. However, one of the other fields changing indicates |
|
685 | # rest. However, one of the other fields changing indicates | |
690 | # something fishy going on, so return False if anything but atime |
|
686 | # something fishy going on, so return False if anything but atime | |
691 | # changes. |
|
687 | # changes. | |
692 | return ( |
|
688 | return ( | |
693 | self.stat.st_mode == other.stat.st_mode |
|
689 | self.stat.st_mode == other.stat.st_mode | |
694 | and self.stat.st_ino == other.stat.st_ino |
|
690 | and self.stat.st_ino == other.stat.st_ino | |
695 | and self.stat.st_dev == other.stat.st_dev |
|
691 | and self.stat.st_dev == other.stat.st_dev | |
696 | and self.stat.st_nlink == other.stat.st_nlink |
|
692 | and self.stat.st_nlink == other.stat.st_nlink | |
697 | and self.stat.st_uid == other.stat.st_uid |
|
693 | and self.stat.st_uid == other.stat.st_uid | |
698 | and self.stat.st_gid == other.stat.st_gid |
|
694 | and self.stat.st_gid == other.stat.st_gid | |
699 | and self.stat.st_size == other.stat.st_size |
|
695 | and self.stat.st_size == other.stat.st_size | |
700 | and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] |
|
696 | and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] | |
701 | and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME] |
|
697 | and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME] | |
702 | ) |
|
698 | ) | |
703 | except AttributeError: |
|
699 | except AttributeError: | |
704 | return False |
|
700 | return False | |
705 |
|
701 | |||
706 | def __ne__(self, other): |
|
702 | def __ne__(self, other): | |
707 | return not self == other |
|
703 | return not self == other | |
708 |
|
704 | |||
709 |
|
705 | |||
710 | def statislink(st): |
|
706 | def statislink(st): | |
711 | '''check whether a stat result is a symlink''' |
|
707 | '''check whether a stat result is a symlink''' | |
712 | return st and stat.S_ISLNK(st.st_mode) |
|
708 | return st and stat.S_ISLNK(st.st_mode) | |
713 |
|
709 | |||
714 |
|
710 | |||
715 | def statisexec(st): |
|
711 | def statisexec(st): | |
716 | '''check whether a stat result is an executable file''' |
|
712 | '''check whether a stat result is an executable file''' | |
717 | return st and (st.st_mode & 0o100 != 0) |
|
713 | return st and (st.st_mode & 0o100 != 0) | |
718 |
|
714 | |||
719 |
|
715 | |||
720 | def poll(fds): |
|
716 | def poll(fds): | |
721 | """block until something happens on any file descriptor |
|
717 | """block until something happens on any file descriptor | |
722 |
|
718 | |||
723 | This is a generic helper that will check for any activity |
|
719 | This is a generic helper that will check for any activity | |
724 | (read, write. exception) and return the list of touched files. |
|
720 | (read, write. exception) and return the list of touched files. | |
725 |
|
721 | |||
726 | In unsupported cases, it will raise a NotImplementedError""" |
|
722 | In unsupported cases, it will raise a NotImplementedError""" | |
727 | try: |
|
723 | try: | |
728 | while True: |
|
724 | while True: | |
729 | try: |
|
725 | try: | |
730 | res = select.select(fds, fds, fds) |
|
726 | res = select.select(fds, fds, fds) | |
731 | break |
|
727 | break | |
732 | except select.error as inst: |
|
728 | except select.error as inst: | |
733 | if inst.args[0] == errno.EINTR: |
|
729 | if inst.args[0] == errno.EINTR: | |
734 | continue |
|
730 | continue | |
735 | raise |
|
731 | raise | |
736 | except ValueError: # out of range file descriptor |
|
732 | except ValueError: # out of range file descriptor | |
737 | raise NotImplementedError() |
|
733 | raise NotImplementedError() | |
738 | return sorted(list(set(sum(res, [])))) |
|
734 | return sorted(list(set(sum(res, [])))) | |
739 |
|
735 | |||
740 |
|
736 | |||
741 | def readpipe(pipe): |
|
737 | def readpipe(pipe): | |
742 | """Read all available data from a pipe.""" |
|
738 | """Read all available data from a pipe.""" | |
743 | # We can't fstat() a pipe because Linux will always report 0. |
|
739 | # We can't fstat() a pipe because Linux will always report 0. | |
744 | # So, we set the pipe to non-blocking mode and read everything |
|
740 | # So, we set the pipe to non-blocking mode and read everything | |
745 | # that's available. |
|
741 | # that's available. | |
746 | flags = fcntl.fcntl(pipe, fcntl.F_GETFL) |
|
742 | flags = fcntl.fcntl(pipe, fcntl.F_GETFL) | |
747 | flags |= os.O_NONBLOCK |
|
743 | flags |= os.O_NONBLOCK | |
748 | oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags) |
|
744 | oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags) | |
749 |
|
745 | |||
750 | try: |
|
746 | try: | |
751 | chunks = [] |
|
747 | chunks = [] | |
752 | while True: |
|
748 | while True: | |
753 | try: |
|
749 | try: | |
754 | s = pipe.read() |
|
750 | s = pipe.read() | |
755 | if not s: |
|
751 | if not s: | |
756 | break |
|
752 | break | |
757 | chunks.append(s) |
|
753 | chunks.append(s) | |
758 | except IOError: |
|
754 | except IOError: | |
759 | break |
|
755 | break | |
760 |
|
756 | |||
761 | return b''.join(chunks) |
|
757 | return b''.join(chunks) | |
762 | finally: |
|
758 | finally: | |
763 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) |
|
759 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) | |
764 |
|
760 | |||
765 |
|
761 | |||
766 | def bindunixsocket(sock, path): |
|
762 | def bindunixsocket(sock, path): | |
767 | """Bind the UNIX domain socket to the specified path""" |
|
763 | """Bind the UNIX domain socket to the specified path""" | |
768 | # use relative path instead of full path at bind() if possible, since |
|
764 | # use relative path instead of full path at bind() if possible, since | |
769 | # AF_UNIX path has very small length limit (107 chars) on common |
|
765 | # AF_UNIX path has very small length limit (107 chars) on common | |
770 | # platforms (see sys/un.h) |
|
766 | # platforms (see sys/un.h) | |
771 | dirname, basename = os.path.split(path) |
|
767 | dirname, basename = os.path.split(path) | |
772 | bakwdfd = None |
|
768 | bakwdfd = None | |
773 | if dirname: |
|
769 | if dirname: | |
774 | bakwdfd = os.open(b'.', os.O_DIRECTORY) |
|
770 | bakwdfd = os.open(b'.', os.O_DIRECTORY) | |
775 | os.chdir(dirname) |
|
771 | os.chdir(dirname) | |
776 | sock.bind(basename) |
|
772 | sock.bind(basename) | |
777 | if bakwdfd: |
|
773 | if bakwdfd: | |
778 | os.fchdir(bakwdfd) |
|
774 | os.fchdir(bakwdfd) | |
779 | os.close(bakwdfd) |
|
775 | os.close(bakwdfd) |
@@ -1,709 +1,708 b'' | |||||
1 | # sshpeer.py - ssh repository proxy class for mercurial |
|
1 | # sshpeer.py - ssh repository proxy class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import re |
|
10 | import re | |
11 | import uuid |
|
11 | import uuid | |
12 |
|
12 | |||
13 | from .i18n import _ |
|
13 | from .i18n import _ | |
14 | from .pycompat import getattr |
|
14 | from .pycompat import getattr | |
15 | from . import ( |
|
15 | from . import ( | |
16 | error, |
|
16 | error, | |
17 | pycompat, |
|
17 | pycompat, | |
18 | util, |
|
18 | util, | |
19 | wireprotoserver, |
|
19 | wireprotoserver, | |
20 | wireprototypes, |
|
20 | wireprototypes, | |
21 | wireprotov1peer, |
|
21 | wireprotov1peer, | |
22 | wireprotov1server, |
|
22 | wireprotov1server, | |
23 | ) |
|
23 | ) | |
24 | from .utils import ( |
|
24 | from .utils import ( | |
25 | procutil, |
|
25 | procutil, | |
26 | stringutil, |
|
26 | stringutil, | |
27 | ) |
|
27 | ) | |
28 |
|
28 | |||
29 |
|
29 | |||
30 | def _serverquote(s): |
|
30 | def _serverquote(s): | |
31 | """quote a string for the remote shell ... which we assume is sh""" |
|
31 | """quote a string for the remote shell ... which we assume is sh""" | |
32 | if not s: |
|
32 | if not s: | |
33 | return s |
|
33 | return s | |
34 | if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s): |
|
34 | if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s): | |
35 | return s |
|
35 | return s | |
36 | return b"'%s'" % s.replace(b"'", b"'\\''") |
|
36 | return b"'%s'" % s.replace(b"'", b"'\\''") | |
37 |
|
37 | |||
38 |
|
38 | |||
39 | def _forwardoutput(ui, pipe, warn=False): |
|
39 | def _forwardoutput(ui, pipe, warn=False): | |
40 | """display all data currently available on pipe as remote output. |
|
40 | """display all data currently available on pipe as remote output. | |
41 |
|
41 | |||
42 | This is non blocking.""" |
|
42 | This is non blocking.""" | |
43 | if pipe: |
|
43 | if pipe: | |
44 | s = procutil.readpipe(pipe) |
|
44 | s = procutil.readpipe(pipe) | |
45 | if s: |
|
45 | if s: | |
46 | display = ui.warn if warn else ui.status |
|
46 | display = ui.warn if warn else ui.status | |
47 | for l in s.splitlines(): |
|
47 | for l in s.splitlines(): | |
48 | display(_(b"remote: "), l, b'\n') |
|
48 | display(_(b"remote: "), l, b'\n') | |
49 |
|
49 | |||
50 |
|
50 | |||
51 | class doublepipe(object): |
|
51 | class doublepipe(object): | |
52 | """Operate a side-channel pipe in addition of a main one |
|
52 | """Operate a side-channel pipe in addition of a main one | |
53 |
|
53 | |||
54 | The side-channel pipe contains server output to be forwarded to the user |
|
54 | The side-channel pipe contains server output to be forwarded to the user | |
55 | input. The double pipe will behave as the "main" pipe, but will ensure the |
|
55 | input. The double pipe will behave as the "main" pipe, but will ensure the | |
56 | content of the "side" pipe is properly processed while we wait for blocking |
|
56 | content of the "side" pipe is properly processed while we wait for blocking | |
57 | call on the "main" pipe. |
|
57 | call on the "main" pipe. | |
58 |
|
58 | |||
59 | If large amounts of data are read from "main", the forward will cease after |
|
59 | If large amounts of data are read from "main", the forward will cease after | |
60 | the first bytes start to appear. This simplifies the implementation |
|
60 | the first bytes start to appear. This simplifies the implementation | |
61 | without affecting actual output of sshpeer too much as we rarely issue |
|
61 | without affecting actual output of sshpeer too much as we rarely issue | |
62 | large read for data not yet emitted by the server. |
|
62 | large read for data not yet emitted by the server. | |
63 |
|
63 | |||
64 | The main pipe is expected to be a 'bufferedinputpipe' from the util module |
|
64 | The main pipe is expected to be a 'bufferedinputpipe' from the util module | |
65 | that handle all the os specific bits. This class lives in this module |
|
65 | that handle all the os specific bits. This class lives in this module | |
66 | because it focus on behavior specific to the ssh protocol.""" |
|
66 | because it focus on behavior specific to the ssh protocol.""" | |
67 |
|
67 | |||
68 | def __init__(self, ui, main, side): |
|
68 | def __init__(self, ui, main, side): | |
69 | self._ui = ui |
|
69 | self._ui = ui | |
70 | self._main = main |
|
70 | self._main = main | |
71 | self._side = side |
|
71 | self._side = side | |
72 |
|
72 | |||
73 | def _wait(self): |
|
73 | def _wait(self): | |
74 | """wait until some data are available on main or side |
|
74 | """wait until some data are available on main or side | |
75 |
|
75 | |||
76 | return a pair of boolean (ismainready, issideready) |
|
76 | return a pair of boolean (ismainready, issideready) | |
77 |
|
77 | |||
78 | (This will only wait for data if the setup is supported by `util.poll`) |
|
78 | (This will only wait for data if the setup is supported by `util.poll`) | |
79 | """ |
|
79 | """ | |
80 | if ( |
|
80 | if ( | |
81 | isinstance(self._main, util.bufferedinputpipe) |
|
81 | isinstance(self._main, util.bufferedinputpipe) | |
82 | and self._main.hasbuffer |
|
82 | and self._main.hasbuffer | |
83 | ): |
|
83 | ): | |
84 | # Main has data. Assume side is worth poking at. |
|
84 | # Main has data. Assume side is worth poking at. | |
85 | return True, True |
|
85 | return True, True | |
86 |
|
86 | |||
87 | fds = [self._main.fileno(), self._side.fileno()] |
|
87 | fds = [self._main.fileno(), self._side.fileno()] | |
88 | try: |
|
88 | try: | |
89 | act = util.poll(fds) |
|
89 | act = util.poll(fds) | |
90 | except NotImplementedError: |
|
90 | except NotImplementedError: | |
91 | # non supported yet case, assume all have data. |
|
91 | # non supported yet case, assume all have data. | |
92 | act = fds |
|
92 | act = fds | |
93 | return (self._main.fileno() in act, self._side.fileno() in act) |
|
93 | return (self._main.fileno() in act, self._side.fileno() in act) | |
94 |
|
94 | |||
95 | def write(self, data): |
|
95 | def write(self, data): | |
96 | return self._call(b'write', data) |
|
96 | return self._call(b'write', data) | |
97 |
|
97 | |||
98 | def read(self, size): |
|
98 | def read(self, size): | |
99 | r = self._call(b'read', size) |
|
99 | r = self._call(b'read', size) | |
100 | if size != 0 and not r: |
|
100 | if size != 0 and not r: | |
101 | # We've observed a condition that indicates the |
|
101 | # We've observed a condition that indicates the | |
102 | # stdout closed unexpectedly. Check stderr one |
|
102 | # stdout closed unexpectedly. Check stderr one | |
103 | # more time and snag anything that's there before |
|
103 | # more time and snag anything that's there before | |
104 | # letting anyone know the main part of the pipe |
|
104 | # letting anyone know the main part of the pipe | |
105 | # closed prematurely. |
|
105 | # closed prematurely. | |
106 | _forwardoutput(self._ui, self._side) |
|
106 | _forwardoutput(self._ui, self._side) | |
107 | return r |
|
107 | return r | |
108 |
|
108 | |||
109 | def unbufferedread(self, size): |
|
109 | def unbufferedread(self, size): | |
110 | r = self._call(b'unbufferedread', size) |
|
110 | r = self._call(b'unbufferedread', size) | |
111 | if size != 0 and not r: |
|
111 | if size != 0 and not r: | |
112 | # We've observed a condition that indicates the |
|
112 | # We've observed a condition that indicates the | |
113 | # stdout closed unexpectedly. Check stderr one |
|
113 | # stdout closed unexpectedly. Check stderr one | |
114 | # more time and snag anything that's there before |
|
114 | # more time and snag anything that's there before | |
115 | # letting anyone know the main part of the pipe |
|
115 | # letting anyone know the main part of the pipe | |
116 | # closed prematurely. |
|
116 | # closed prematurely. | |
117 | _forwardoutput(self._ui, self._side) |
|
117 | _forwardoutput(self._ui, self._side) | |
118 | return r |
|
118 | return r | |
119 |
|
119 | |||
120 | def readline(self): |
|
120 | def readline(self): | |
121 | return self._call(b'readline') |
|
121 | return self._call(b'readline') | |
122 |
|
122 | |||
123 | def _call(self, methname, data=None): |
|
123 | def _call(self, methname, data=None): | |
124 | """call <methname> on "main", forward output of "side" while blocking |
|
124 | """call <methname> on "main", forward output of "side" while blocking | |
125 | """ |
|
125 | """ | |
126 | # data can be '' or 0 |
|
126 | # data can be '' or 0 | |
127 | if (data is not None and not data) or self._main.closed: |
|
127 | if (data is not None and not data) or self._main.closed: | |
128 | _forwardoutput(self._ui, self._side) |
|
128 | _forwardoutput(self._ui, self._side) | |
129 | return b'' |
|
129 | return b'' | |
130 | while True: |
|
130 | while True: | |
131 | mainready, sideready = self._wait() |
|
131 | mainready, sideready = self._wait() | |
132 | if sideready: |
|
132 | if sideready: | |
133 | _forwardoutput(self._ui, self._side) |
|
133 | _forwardoutput(self._ui, self._side) | |
134 | if mainready: |
|
134 | if mainready: | |
135 | meth = getattr(self._main, methname) |
|
135 | meth = getattr(self._main, methname) | |
136 | if data is None: |
|
136 | if data is None: | |
137 | return meth() |
|
137 | return meth() | |
138 | else: |
|
138 | else: | |
139 | return meth(data) |
|
139 | return meth(data) | |
140 |
|
140 | |||
141 | def close(self): |
|
141 | def close(self): | |
142 | return self._main.close() |
|
142 | return self._main.close() | |
143 |
|
143 | |||
144 | def flush(self): |
|
144 | def flush(self): | |
145 | return self._main.flush() |
|
145 | return self._main.flush() | |
146 |
|
146 | |||
147 |
|
147 | |||
148 | def _cleanuppipes(ui, pipei, pipeo, pipee): |
|
148 | def _cleanuppipes(ui, pipei, pipeo, pipee): | |
149 | """Clean up pipes used by an SSH connection.""" |
|
149 | """Clean up pipes used by an SSH connection.""" | |
150 | if pipeo: |
|
150 | if pipeo: | |
151 | pipeo.close() |
|
151 | pipeo.close() | |
152 | if pipei: |
|
152 | if pipei: | |
153 | pipei.close() |
|
153 | pipei.close() | |
154 |
|
154 | |||
155 | if pipee: |
|
155 | if pipee: | |
156 | # Try to read from the err descriptor until EOF. |
|
156 | # Try to read from the err descriptor until EOF. | |
157 | try: |
|
157 | try: | |
158 | for l in pipee: |
|
158 | for l in pipee: | |
159 | ui.status(_(b'remote: '), l) |
|
159 | ui.status(_(b'remote: '), l) | |
160 | except (IOError, ValueError): |
|
160 | except (IOError, ValueError): | |
161 | pass |
|
161 | pass | |
162 |
|
162 | |||
163 | pipee.close() |
|
163 | pipee.close() | |
164 |
|
164 | |||
165 |
|
165 | |||
166 | def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None): |
|
166 | def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None): | |
167 | """Create an SSH connection to a server. |
|
167 | """Create an SSH connection to a server. | |
168 |
|
168 | |||
169 | Returns a tuple of (process, stdin, stdout, stderr) for the |
|
169 | Returns a tuple of (process, stdin, stdout, stderr) for the | |
170 | spawned process. |
|
170 | spawned process. | |
171 | """ |
|
171 | """ | |
172 | cmd = b'%s %s %s' % ( |
|
172 | cmd = b'%s %s %s' % ( | |
173 | sshcmd, |
|
173 | sshcmd, | |
174 | args, |
|
174 | args, | |
175 | procutil.shellquote( |
|
175 | procutil.shellquote( | |
176 | b'%s -R %s serve --stdio' |
|
176 | b'%s -R %s serve --stdio' | |
177 | % (_serverquote(remotecmd), _serverquote(path)) |
|
177 | % (_serverquote(remotecmd), _serverquote(path)) | |
178 | ), |
|
178 | ), | |
179 | ) |
|
179 | ) | |
180 |
|
180 | |||
181 | ui.debug(b'running %s\n' % cmd) |
|
181 | ui.debug(b'running %s\n' % cmd) | |
182 | cmd = procutil.quotecommand(cmd) |
|
|||
183 |
|
182 | |||
184 | # no buffer allow the use of 'select' |
|
183 | # no buffer allow the use of 'select' | |
185 | # feel free to remove buffering and select usage when we ultimately |
|
184 | # feel free to remove buffering and select usage when we ultimately | |
186 | # move to threading. |
|
185 | # move to threading. | |
187 | stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv) |
|
186 | stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv) | |
188 |
|
187 | |||
189 | return proc, stdin, stdout, stderr |
|
188 | return proc, stdin, stdout, stderr | |
190 |
|
189 | |||
191 |
|
190 | |||
192 | def _clientcapabilities(): |
|
191 | def _clientcapabilities(): | |
193 | """Return list of capabilities of this client. |
|
192 | """Return list of capabilities of this client. | |
194 |
|
193 | |||
195 | Returns a list of capabilities that are supported by this client. |
|
194 | Returns a list of capabilities that are supported by this client. | |
196 | """ |
|
195 | """ | |
197 | protoparams = {b'partial-pull'} |
|
196 | protoparams = {b'partial-pull'} | |
198 | comps = [ |
|
197 | comps = [ | |
199 | e.wireprotosupport().name |
|
198 | e.wireprotosupport().name | |
200 | for e in util.compengines.supportedwireengines(util.CLIENTROLE) |
|
199 | for e in util.compengines.supportedwireengines(util.CLIENTROLE) | |
201 | ] |
|
200 | ] | |
202 | protoparams.add(b'comp=%s' % b','.join(comps)) |
|
201 | protoparams.add(b'comp=%s' % b','.join(comps)) | |
203 | return protoparams |
|
202 | return protoparams | |
204 |
|
203 | |||
205 |
|
204 | |||
206 | def _performhandshake(ui, stdin, stdout, stderr): |
|
205 | def _performhandshake(ui, stdin, stdout, stderr): | |
207 | def badresponse(): |
|
206 | def badresponse(): | |
208 | # Flush any output on stderr. In general, the stderr contains errors |
|
207 | # Flush any output on stderr. In general, the stderr contains errors | |
209 | # from the remote (ssh errors, some hg errors), and status indications |
|
208 | # from the remote (ssh errors, some hg errors), and status indications | |
210 | # (like "adding changes"), with no current way to tell them apart. |
|
209 | # (like "adding changes"), with no current way to tell them apart. | |
211 | # Here we failed so early that it's almost certainly only errors, so |
|
210 | # Here we failed so early that it's almost certainly only errors, so | |
212 | # use warn=True so -q doesn't hide them. |
|
211 | # use warn=True so -q doesn't hide them. | |
213 | _forwardoutput(ui, stderr, warn=True) |
|
212 | _forwardoutput(ui, stderr, warn=True) | |
214 |
|
213 | |||
215 | msg = _(b'no suitable response from remote hg') |
|
214 | msg = _(b'no suitable response from remote hg') | |
216 | hint = ui.config(b'ui', b'ssherrorhint') |
|
215 | hint = ui.config(b'ui', b'ssherrorhint') | |
217 | raise error.RepoError(msg, hint=hint) |
|
216 | raise error.RepoError(msg, hint=hint) | |
218 |
|
217 | |||
219 | # The handshake consists of sending wire protocol commands in reverse |
|
218 | # The handshake consists of sending wire protocol commands in reverse | |
220 | # order of protocol implementation and then sniffing for a response |
|
219 | # order of protocol implementation and then sniffing for a response | |
221 | # to one of them. |
|
220 | # to one of them. | |
222 | # |
|
221 | # | |
223 | # Those commands (from oldest to newest) are: |
|
222 | # Those commands (from oldest to newest) are: | |
224 | # |
|
223 | # | |
225 | # ``between`` |
|
224 | # ``between`` | |
226 | # Asks for the set of revisions between a pair of revisions. Command |
|
225 | # Asks for the set of revisions between a pair of revisions. Command | |
227 | # present in all Mercurial server implementations. |
|
226 | # present in all Mercurial server implementations. | |
228 | # |
|
227 | # | |
229 | # ``hello`` |
|
228 | # ``hello`` | |
230 | # Instructs the server to advertise its capabilities. Introduced in |
|
229 | # Instructs the server to advertise its capabilities. Introduced in | |
231 | # Mercurial 0.9.1. |
|
230 | # Mercurial 0.9.1. | |
232 | # |
|
231 | # | |
233 | # ``upgrade`` |
|
232 | # ``upgrade`` | |
234 | # Requests upgrade from default transport protocol version 1 to |
|
233 | # Requests upgrade from default transport protocol version 1 to | |
235 | # a newer version. Introduced in Mercurial 4.6 as an experimental |
|
234 | # a newer version. Introduced in Mercurial 4.6 as an experimental | |
236 | # feature. |
|
235 | # feature. | |
237 | # |
|
236 | # | |
238 | # The ``between`` command is issued with a request for the null |
|
237 | # The ``between`` command is issued with a request for the null | |
239 | # range. If the remote is a Mercurial server, this request will |
|
238 | # range. If the remote is a Mercurial server, this request will | |
240 | # generate a specific response: ``1\n\n``. This represents the |
|
239 | # generate a specific response: ``1\n\n``. This represents the | |
241 | # wire protocol encoded value for ``\n``. We look for ``1\n\n`` |
|
240 | # wire protocol encoded value for ``\n``. We look for ``1\n\n`` | |
242 | # in the output stream and know this is the response to ``between`` |
|
241 | # in the output stream and know this is the response to ``between`` | |
243 | # and we're at the end of our handshake reply. |
|
242 | # and we're at the end of our handshake reply. | |
244 | # |
|
243 | # | |
245 | # The response to the ``hello`` command will be a line with the |
|
244 | # The response to the ``hello`` command will be a line with the | |
246 | # length of the value returned by that command followed by that |
|
245 | # length of the value returned by that command followed by that | |
247 | # value. If the server doesn't support ``hello`` (which should be |
|
246 | # value. If the server doesn't support ``hello`` (which should be | |
248 | # rare), that line will be ``0\n``. Otherwise, the value will contain |
|
247 | # rare), that line will be ``0\n``. Otherwise, the value will contain | |
249 | # RFC 822 like lines. Of these, the ``capabilities:`` line contains |
|
248 | # RFC 822 like lines. Of these, the ``capabilities:`` line contains | |
250 | # the capabilities of the server. |
|
249 | # the capabilities of the server. | |
251 | # |
|
250 | # | |
252 | # The ``upgrade`` command isn't really a command in the traditional |
|
251 | # The ``upgrade`` command isn't really a command in the traditional | |
253 | # sense of version 1 of the transport because it isn't using the |
|
252 | # sense of version 1 of the transport because it isn't using the | |
254 | # proper mechanism for formatting insteads: instead, it just encodes |
|
253 | # proper mechanism for formatting insteads: instead, it just encodes | |
255 | # arguments on the line, delimited by spaces. |
|
254 | # arguments on the line, delimited by spaces. | |
256 | # |
|
255 | # | |
257 | # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``. |
|
256 | # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``. | |
258 | # If the server doesn't support protocol upgrades, it will reply to |
|
257 | # If the server doesn't support protocol upgrades, it will reply to | |
259 | # this line with ``0\n``. Otherwise, it emits an |
|
258 | # this line with ``0\n``. Otherwise, it emits an | |
260 | # ``upgraded <token> <protocol>`` line to both stdout and stderr. |
|
259 | # ``upgraded <token> <protocol>`` line to both stdout and stderr. | |
261 | # Content immediately following this line describes additional |
|
260 | # Content immediately following this line describes additional | |
262 | # protocol and server state. |
|
261 | # protocol and server state. | |
263 | # |
|
262 | # | |
264 | # In addition to the responses to our command requests, the server |
|
263 | # In addition to the responses to our command requests, the server | |
265 | # may emit "banner" output on stdout. SSH servers are allowed to |
|
264 | # may emit "banner" output on stdout. SSH servers are allowed to | |
266 | # print messages to stdout on login. Issuing commands on connection |
|
265 | # print messages to stdout on login. Issuing commands on connection | |
267 | # allows us to flush this banner output from the server by scanning |
|
266 | # allows us to flush this banner output from the server by scanning | |
268 | # for output to our well-known ``between`` command. Of course, if |
|
267 | # for output to our well-known ``between`` command. Of course, if | |
269 | # the banner contains ``1\n\n``, this will throw off our detection. |
|
268 | # the banner contains ``1\n\n``, this will throw off our detection. | |
270 |
|
269 | |||
271 | requestlog = ui.configbool(b'devel', b'debug.peer-request') |
|
270 | requestlog = ui.configbool(b'devel', b'debug.peer-request') | |
272 |
|
271 | |||
273 | # Generate a random token to help identify responses to version 2 |
|
272 | # Generate a random token to help identify responses to version 2 | |
274 | # upgrade request. |
|
273 | # upgrade request. | |
275 | token = pycompat.sysbytes(str(uuid.uuid4())) |
|
274 | token = pycompat.sysbytes(str(uuid.uuid4())) | |
276 | upgradecaps = [ |
|
275 | upgradecaps = [ | |
277 | (b'proto', wireprotoserver.SSHV2), |
|
276 | (b'proto', wireprotoserver.SSHV2), | |
278 | ] |
|
277 | ] | |
279 | upgradecaps = util.urlreq.urlencode(upgradecaps) |
|
278 | upgradecaps = util.urlreq.urlencode(upgradecaps) | |
280 |
|
279 | |||
281 | try: |
|
280 | try: | |
282 | pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40) |
|
281 | pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40) | |
283 | handshake = [ |
|
282 | handshake = [ | |
284 | b'hello\n', |
|
283 | b'hello\n', | |
285 | b'between\n', |
|
284 | b'between\n', | |
286 | b'pairs %d\n' % len(pairsarg), |
|
285 | b'pairs %d\n' % len(pairsarg), | |
287 | pairsarg, |
|
286 | pairsarg, | |
288 | ] |
|
287 | ] | |
289 |
|
288 | |||
290 | # Request upgrade to version 2 if configured. |
|
289 | # Request upgrade to version 2 if configured. | |
291 | if ui.configbool(b'experimental', b'sshpeer.advertise-v2'): |
|
290 | if ui.configbool(b'experimental', b'sshpeer.advertise-v2'): | |
292 | ui.debug(b'sending upgrade request: %s %s\n' % (token, upgradecaps)) |
|
291 | ui.debug(b'sending upgrade request: %s %s\n' % (token, upgradecaps)) | |
293 | handshake.insert(0, b'upgrade %s %s\n' % (token, upgradecaps)) |
|
292 | handshake.insert(0, b'upgrade %s %s\n' % (token, upgradecaps)) | |
294 |
|
293 | |||
295 | if requestlog: |
|
294 | if requestlog: | |
296 | ui.debug(b'devel-peer-request: hello+between\n') |
|
295 | ui.debug(b'devel-peer-request: hello+between\n') | |
297 | ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg)) |
|
296 | ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg)) | |
298 | ui.debug(b'sending hello command\n') |
|
297 | ui.debug(b'sending hello command\n') | |
299 | ui.debug(b'sending between command\n') |
|
298 | ui.debug(b'sending between command\n') | |
300 |
|
299 | |||
301 | stdin.write(b''.join(handshake)) |
|
300 | stdin.write(b''.join(handshake)) | |
302 | stdin.flush() |
|
301 | stdin.flush() | |
303 | except IOError: |
|
302 | except IOError: | |
304 | badresponse() |
|
303 | badresponse() | |
305 |
|
304 | |||
306 | # Assume version 1 of wire protocol by default. |
|
305 | # Assume version 1 of wire protocol by default. | |
307 | protoname = wireprototypes.SSHV1 |
|
306 | protoname = wireprototypes.SSHV1 | |
308 | reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token)) |
|
307 | reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token)) | |
309 |
|
308 | |||
310 | lines = [b'', b'dummy'] |
|
309 | lines = [b'', b'dummy'] | |
311 | max_noise = 500 |
|
310 | max_noise = 500 | |
312 | while lines[-1] and max_noise: |
|
311 | while lines[-1] and max_noise: | |
313 | try: |
|
312 | try: | |
314 | l = stdout.readline() |
|
313 | l = stdout.readline() | |
315 | _forwardoutput(ui, stderr, warn=True) |
|
314 | _forwardoutput(ui, stderr, warn=True) | |
316 |
|
315 | |||
317 | # Look for reply to protocol upgrade request. It has a token |
|
316 | # Look for reply to protocol upgrade request. It has a token | |
318 | # in it, so there should be no false positives. |
|
317 | # in it, so there should be no false positives. | |
319 | m = reupgraded.match(l) |
|
318 | m = reupgraded.match(l) | |
320 | if m: |
|
319 | if m: | |
321 | protoname = m.group(1) |
|
320 | protoname = m.group(1) | |
322 | ui.debug(b'protocol upgraded to %s\n' % protoname) |
|
321 | ui.debug(b'protocol upgraded to %s\n' % protoname) | |
323 | # If an upgrade was handled, the ``hello`` and ``between`` |
|
322 | # If an upgrade was handled, the ``hello`` and ``between`` | |
324 | # requests are ignored. The next output belongs to the |
|
323 | # requests are ignored. The next output belongs to the | |
325 | # protocol, so stop scanning lines. |
|
324 | # protocol, so stop scanning lines. | |
326 | break |
|
325 | break | |
327 |
|
326 | |||
328 | # Otherwise it could be a banner, ``0\n`` response if server |
|
327 | # Otherwise it could be a banner, ``0\n`` response if server | |
329 | # doesn't support upgrade. |
|
328 | # doesn't support upgrade. | |
330 |
|
329 | |||
331 | if lines[-1] == b'1\n' and l == b'\n': |
|
330 | if lines[-1] == b'1\n' and l == b'\n': | |
332 | break |
|
331 | break | |
333 | if l: |
|
332 | if l: | |
334 | ui.debug(b'remote: ', l) |
|
333 | ui.debug(b'remote: ', l) | |
335 | lines.append(l) |
|
334 | lines.append(l) | |
336 | max_noise -= 1 |
|
335 | max_noise -= 1 | |
337 | except IOError: |
|
336 | except IOError: | |
338 | badresponse() |
|
337 | badresponse() | |
339 | else: |
|
338 | else: | |
340 | badresponse() |
|
339 | badresponse() | |
341 |
|
340 | |||
342 | caps = set() |
|
341 | caps = set() | |
343 |
|
342 | |||
344 | # For version 1, we should see a ``capabilities`` line in response to the |
|
343 | # For version 1, we should see a ``capabilities`` line in response to the | |
345 | # ``hello`` command. |
|
344 | # ``hello`` command. | |
346 | if protoname == wireprototypes.SSHV1: |
|
345 | if protoname == wireprototypes.SSHV1: | |
347 | for l in reversed(lines): |
|
346 | for l in reversed(lines): | |
348 | # Look for response to ``hello`` command. Scan from the back so |
|
347 | # Look for response to ``hello`` command. Scan from the back so | |
349 | # we don't misinterpret banner output as the command reply. |
|
348 | # we don't misinterpret banner output as the command reply. | |
350 | if l.startswith(b'capabilities:'): |
|
349 | if l.startswith(b'capabilities:'): | |
351 | caps.update(l[:-1].split(b':')[1].split()) |
|
350 | caps.update(l[:-1].split(b':')[1].split()) | |
352 | break |
|
351 | break | |
353 | elif protoname == wireprotoserver.SSHV2: |
|
352 | elif protoname == wireprotoserver.SSHV2: | |
354 | # We see a line with number of bytes to follow and then a value |
|
353 | # We see a line with number of bytes to follow and then a value | |
355 | # looking like ``capabilities: *``. |
|
354 | # looking like ``capabilities: *``. | |
356 | line = stdout.readline() |
|
355 | line = stdout.readline() | |
357 | try: |
|
356 | try: | |
358 | valuelen = int(line) |
|
357 | valuelen = int(line) | |
359 | except ValueError: |
|
358 | except ValueError: | |
360 | badresponse() |
|
359 | badresponse() | |
361 |
|
360 | |||
362 | capsline = stdout.read(valuelen) |
|
361 | capsline = stdout.read(valuelen) | |
363 | if not capsline.startswith(b'capabilities: '): |
|
362 | if not capsline.startswith(b'capabilities: '): | |
364 | badresponse() |
|
363 | badresponse() | |
365 |
|
364 | |||
366 | ui.debug(b'remote: %s\n' % capsline) |
|
365 | ui.debug(b'remote: %s\n' % capsline) | |
367 |
|
366 | |||
368 | caps.update(capsline.split(b':')[1].split()) |
|
367 | caps.update(capsline.split(b':')[1].split()) | |
369 | # Trailing newline. |
|
368 | # Trailing newline. | |
370 | stdout.read(1) |
|
369 | stdout.read(1) | |
371 |
|
370 | |||
372 | # Error if we couldn't find capabilities, this means: |
|
371 | # Error if we couldn't find capabilities, this means: | |
373 | # |
|
372 | # | |
374 | # 1. Remote isn't a Mercurial server |
|
373 | # 1. Remote isn't a Mercurial server | |
375 | # 2. Remote is a <0.9.1 Mercurial server |
|
374 | # 2. Remote is a <0.9.1 Mercurial server | |
376 | # 3. Remote is a future Mercurial server that dropped ``hello`` |
|
375 | # 3. Remote is a future Mercurial server that dropped ``hello`` | |
377 | # and other attempted handshake mechanisms. |
|
376 | # and other attempted handshake mechanisms. | |
378 | if not caps: |
|
377 | if not caps: | |
379 | badresponse() |
|
378 | badresponse() | |
380 |
|
379 | |||
381 | # Flush any output on stderr before proceeding. |
|
380 | # Flush any output on stderr before proceeding. | |
382 | _forwardoutput(ui, stderr, warn=True) |
|
381 | _forwardoutput(ui, stderr, warn=True) | |
383 |
|
382 | |||
384 | return protoname, caps |
|
383 | return protoname, caps | |
385 |
|
384 | |||
386 |
|
385 | |||
387 | class sshv1peer(wireprotov1peer.wirepeer): |
|
386 | class sshv1peer(wireprotov1peer.wirepeer): | |
388 | def __init__( |
|
387 | def __init__( | |
389 | self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True |
|
388 | self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True | |
390 | ): |
|
389 | ): | |
391 | """Create a peer from an existing SSH connection. |
|
390 | """Create a peer from an existing SSH connection. | |
392 |
|
391 | |||
393 | ``proc`` is a handle on the underlying SSH process. |
|
392 | ``proc`` is a handle on the underlying SSH process. | |
394 | ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio |
|
393 | ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio | |
395 | pipes for that process. |
|
394 | pipes for that process. | |
396 | ``caps`` is a set of capabilities supported by the remote. |
|
395 | ``caps`` is a set of capabilities supported by the remote. | |
397 | ``autoreadstderr`` denotes whether to automatically read from |
|
396 | ``autoreadstderr`` denotes whether to automatically read from | |
398 | stderr and to forward its output. |
|
397 | stderr and to forward its output. | |
399 | """ |
|
398 | """ | |
400 | self._url = url |
|
399 | self._url = url | |
401 | self.ui = ui |
|
400 | self.ui = ui | |
402 | # self._subprocess is unused. Keeping a handle on the process |
|
401 | # self._subprocess is unused. Keeping a handle on the process | |
403 | # holds a reference and prevents it from being garbage collected. |
|
402 | # holds a reference and prevents it from being garbage collected. | |
404 | self._subprocess = proc |
|
403 | self._subprocess = proc | |
405 |
|
404 | |||
406 | # And we hook up our "doublepipe" wrapper to allow querying |
|
405 | # And we hook up our "doublepipe" wrapper to allow querying | |
407 | # stderr any time we perform I/O. |
|
406 | # stderr any time we perform I/O. | |
408 | if autoreadstderr: |
|
407 | if autoreadstderr: | |
409 | stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr) |
|
408 | stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr) | |
410 | stdin = doublepipe(ui, stdin, stderr) |
|
409 | stdin = doublepipe(ui, stdin, stderr) | |
411 |
|
410 | |||
412 | self._pipeo = stdin |
|
411 | self._pipeo = stdin | |
413 | self._pipei = stdout |
|
412 | self._pipei = stdout | |
414 | self._pipee = stderr |
|
413 | self._pipee = stderr | |
415 | self._caps = caps |
|
414 | self._caps = caps | |
416 | self._autoreadstderr = autoreadstderr |
|
415 | self._autoreadstderr = autoreadstderr | |
417 |
|
416 | |||
418 | # Commands that have a "framed" response where the first line of the |
|
417 | # Commands that have a "framed" response where the first line of the | |
419 | # response contains the length of that response. |
|
418 | # response contains the length of that response. | |
420 | _FRAMED_COMMANDS = { |
|
419 | _FRAMED_COMMANDS = { | |
421 | b'batch', |
|
420 | b'batch', | |
422 | } |
|
421 | } | |
423 |
|
422 | |||
424 | # Begin of ipeerconnection interface. |
|
423 | # Begin of ipeerconnection interface. | |
425 |
|
424 | |||
426 | def url(self): |
|
425 | def url(self): | |
427 | return self._url |
|
426 | return self._url | |
428 |
|
427 | |||
429 | def local(self): |
|
428 | def local(self): | |
430 | return None |
|
429 | return None | |
431 |
|
430 | |||
432 | def peer(self): |
|
431 | def peer(self): | |
433 | return self |
|
432 | return self | |
434 |
|
433 | |||
435 | def canpush(self): |
|
434 | def canpush(self): | |
436 | return True |
|
435 | return True | |
437 |
|
436 | |||
438 | def close(self): |
|
437 | def close(self): | |
439 | pass |
|
438 | pass | |
440 |
|
439 | |||
441 | # End of ipeerconnection interface. |
|
440 | # End of ipeerconnection interface. | |
442 |
|
441 | |||
443 | # Begin of ipeercommands interface. |
|
442 | # Begin of ipeercommands interface. | |
444 |
|
443 | |||
445 | def capabilities(self): |
|
444 | def capabilities(self): | |
446 | return self._caps |
|
445 | return self._caps | |
447 |
|
446 | |||
448 | # End of ipeercommands interface. |
|
447 | # End of ipeercommands interface. | |
449 |
|
448 | |||
450 | def _readerr(self): |
|
449 | def _readerr(self): | |
451 | _forwardoutput(self.ui, self._pipee) |
|
450 | _forwardoutput(self.ui, self._pipee) | |
452 |
|
451 | |||
453 | def _abort(self, exception): |
|
452 | def _abort(self, exception): | |
454 | self._cleanup() |
|
453 | self._cleanup() | |
455 | raise exception |
|
454 | raise exception | |
456 |
|
455 | |||
457 | def _cleanup(self): |
|
456 | def _cleanup(self): | |
458 | _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee) |
|
457 | _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee) | |
459 |
|
458 | |||
460 | __del__ = _cleanup |
|
459 | __del__ = _cleanup | |
461 |
|
460 | |||
462 | def _sendrequest(self, cmd, args, framed=False): |
|
461 | def _sendrequest(self, cmd, args, framed=False): | |
463 | if self.ui.debugflag and self.ui.configbool( |
|
462 | if self.ui.debugflag and self.ui.configbool( | |
464 | b'devel', b'debug.peer-request' |
|
463 | b'devel', b'debug.peer-request' | |
465 | ): |
|
464 | ): | |
466 | dbg = self.ui.debug |
|
465 | dbg = self.ui.debug | |
467 | line = b'devel-peer-request: %s\n' |
|
466 | line = b'devel-peer-request: %s\n' | |
468 | dbg(line % cmd) |
|
467 | dbg(line % cmd) | |
469 | for key, value in sorted(args.items()): |
|
468 | for key, value in sorted(args.items()): | |
470 | if not isinstance(value, dict): |
|
469 | if not isinstance(value, dict): | |
471 | dbg(line % b' %s: %d bytes' % (key, len(value))) |
|
470 | dbg(line % b' %s: %d bytes' % (key, len(value))) | |
472 | else: |
|
471 | else: | |
473 | for dk, dv in sorted(value.items()): |
|
472 | for dk, dv in sorted(value.items()): | |
474 | dbg(line % b' %s-%s: %d' % (key, dk, len(dv))) |
|
473 | dbg(line % b' %s-%s: %d' % (key, dk, len(dv))) | |
475 | self.ui.debug(b"sending %s command\n" % cmd) |
|
474 | self.ui.debug(b"sending %s command\n" % cmd) | |
476 | self._pipeo.write(b"%s\n" % cmd) |
|
475 | self._pipeo.write(b"%s\n" % cmd) | |
477 | _func, names = wireprotov1server.commands[cmd] |
|
476 | _func, names = wireprotov1server.commands[cmd] | |
478 | keys = names.split() |
|
477 | keys = names.split() | |
479 | wireargs = {} |
|
478 | wireargs = {} | |
480 | for k in keys: |
|
479 | for k in keys: | |
481 | if k == b'*': |
|
480 | if k == b'*': | |
482 | wireargs[b'*'] = args |
|
481 | wireargs[b'*'] = args | |
483 | break |
|
482 | break | |
484 | else: |
|
483 | else: | |
485 | wireargs[k] = args[k] |
|
484 | wireargs[k] = args[k] | |
486 | del args[k] |
|
485 | del args[k] | |
487 | for k, v in sorted(pycompat.iteritems(wireargs)): |
|
486 | for k, v in sorted(pycompat.iteritems(wireargs)): | |
488 | self._pipeo.write(b"%s %d\n" % (k, len(v))) |
|
487 | self._pipeo.write(b"%s %d\n" % (k, len(v))) | |
489 | if isinstance(v, dict): |
|
488 | if isinstance(v, dict): | |
490 | for dk, dv in pycompat.iteritems(v): |
|
489 | for dk, dv in pycompat.iteritems(v): | |
491 | self._pipeo.write(b"%s %d\n" % (dk, len(dv))) |
|
490 | self._pipeo.write(b"%s %d\n" % (dk, len(dv))) | |
492 | self._pipeo.write(dv) |
|
491 | self._pipeo.write(dv) | |
493 | else: |
|
492 | else: | |
494 | self._pipeo.write(v) |
|
493 | self._pipeo.write(v) | |
495 | self._pipeo.flush() |
|
494 | self._pipeo.flush() | |
496 |
|
495 | |||
497 | # We know exactly how many bytes are in the response. So return a proxy |
|
496 | # We know exactly how many bytes are in the response. So return a proxy | |
498 | # around the raw output stream that allows reading exactly this many |
|
497 | # around the raw output stream that allows reading exactly this many | |
499 | # bytes. Callers then can read() without fear of overrunning the |
|
498 | # bytes. Callers then can read() without fear of overrunning the | |
500 | # response. |
|
499 | # response. | |
501 | if framed: |
|
500 | if framed: | |
502 | amount = self._getamount() |
|
501 | amount = self._getamount() | |
503 | return util.cappedreader(self._pipei, amount) |
|
502 | return util.cappedreader(self._pipei, amount) | |
504 |
|
503 | |||
505 | return self._pipei |
|
504 | return self._pipei | |
506 |
|
505 | |||
507 | def _callstream(self, cmd, **args): |
|
506 | def _callstream(self, cmd, **args): | |
508 | args = pycompat.byteskwargs(args) |
|
507 | args = pycompat.byteskwargs(args) | |
509 | return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS) |
|
508 | return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS) | |
510 |
|
509 | |||
511 | def _callcompressable(self, cmd, **args): |
|
510 | def _callcompressable(self, cmd, **args): | |
512 | args = pycompat.byteskwargs(args) |
|
511 | args = pycompat.byteskwargs(args) | |
513 | return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS) |
|
512 | return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS) | |
514 |
|
513 | |||
515 | def _call(self, cmd, **args): |
|
514 | def _call(self, cmd, **args): | |
516 | args = pycompat.byteskwargs(args) |
|
515 | args = pycompat.byteskwargs(args) | |
517 | return self._sendrequest(cmd, args, framed=True).read() |
|
516 | return self._sendrequest(cmd, args, framed=True).read() | |
518 |
|
517 | |||
519 | def _callpush(self, cmd, fp, **args): |
|
518 | def _callpush(self, cmd, fp, **args): | |
520 | # The server responds with an empty frame if the client should |
|
519 | # The server responds with an empty frame if the client should | |
521 | # continue submitting the payload. |
|
520 | # continue submitting the payload. | |
522 | r = self._call(cmd, **args) |
|
521 | r = self._call(cmd, **args) | |
523 | if r: |
|
522 | if r: | |
524 | return b'', r |
|
523 | return b'', r | |
525 |
|
524 | |||
526 | # The payload consists of frames with content followed by an empty |
|
525 | # The payload consists of frames with content followed by an empty | |
527 | # frame. |
|
526 | # frame. | |
528 | for d in iter(lambda: fp.read(4096), b''): |
|
527 | for d in iter(lambda: fp.read(4096), b''): | |
529 | self._writeframed(d) |
|
528 | self._writeframed(d) | |
530 | self._writeframed(b"", flush=True) |
|
529 | self._writeframed(b"", flush=True) | |
531 |
|
530 | |||
532 | # In case of success, there is an empty frame and a frame containing |
|
531 | # In case of success, there is an empty frame and a frame containing | |
533 | # the integer result (as a string). |
|
532 | # the integer result (as a string). | |
534 | # In case of error, there is a non-empty frame containing the error. |
|
533 | # In case of error, there is a non-empty frame containing the error. | |
535 | r = self._readframed() |
|
534 | r = self._readframed() | |
536 | if r: |
|
535 | if r: | |
537 | return b'', r |
|
536 | return b'', r | |
538 | return self._readframed(), b'' |
|
537 | return self._readframed(), b'' | |
539 |
|
538 | |||
540 | def _calltwowaystream(self, cmd, fp, **args): |
|
539 | def _calltwowaystream(self, cmd, fp, **args): | |
541 | # The server responds with an empty frame if the client should |
|
540 | # The server responds with an empty frame if the client should | |
542 | # continue submitting the payload. |
|
541 | # continue submitting the payload. | |
543 | r = self._call(cmd, **args) |
|
542 | r = self._call(cmd, **args) | |
544 | if r: |
|
543 | if r: | |
545 | # XXX needs to be made better |
|
544 | # XXX needs to be made better | |
546 | raise error.Abort(_(b'unexpected remote reply: %s') % r) |
|
545 | raise error.Abort(_(b'unexpected remote reply: %s') % r) | |
547 |
|
546 | |||
548 | # The payload consists of frames with content followed by an empty |
|
547 | # The payload consists of frames with content followed by an empty | |
549 | # frame. |
|
548 | # frame. | |
550 | for d in iter(lambda: fp.read(4096), b''): |
|
549 | for d in iter(lambda: fp.read(4096), b''): | |
551 | self._writeframed(d) |
|
550 | self._writeframed(d) | |
552 | self._writeframed(b"", flush=True) |
|
551 | self._writeframed(b"", flush=True) | |
553 |
|
552 | |||
554 | return self._pipei |
|
553 | return self._pipei | |
555 |
|
554 | |||
556 | def _getamount(self): |
|
555 | def _getamount(self): | |
557 | l = self._pipei.readline() |
|
556 | l = self._pipei.readline() | |
558 | if l == b'\n': |
|
557 | if l == b'\n': | |
559 | if self._autoreadstderr: |
|
558 | if self._autoreadstderr: | |
560 | self._readerr() |
|
559 | self._readerr() | |
561 | msg = _(b'check previous remote output') |
|
560 | msg = _(b'check previous remote output') | |
562 | self._abort(error.OutOfBandError(hint=msg)) |
|
561 | self._abort(error.OutOfBandError(hint=msg)) | |
563 | if self._autoreadstderr: |
|
562 | if self._autoreadstderr: | |
564 | self._readerr() |
|
563 | self._readerr() | |
565 | try: |
|
564 | try: | |
566 | return int(l) |
|
565 | return int(l) | |
567 | except ValueError: |
|
566 | except ValueError: | |
568 | self._abort(error.ResponseError(_(b"unexpected response:"), l)) |
|
567 | self._abort(error.ResponseError(_(b"unexpected response:"), l)) | |
569 |
|
568 | |||
570 | def _readframed(self): |
|
569 | def _readframed(self): | |
571 | size = self._getamount() |
|
570 | size = self._getamount() | |
572 | if not size: |
|
571 | if not size: | |
573 | return b'' |
|
572 | return b'' | |
574 |
|
573 | |||
575 | return self._pipei.read(size) |
|
574 | return self._pipei.read(size) | |
576 |
|
575 | |||
577 | def _writeframed(self, data, flush=False): |
|
576 | def _writeframed(self, data, flush=False): | |
578 | self._pipeo.write(b"%d\n" % len(data)) |
|
577 | self._pipeo.write(b"%d\n" % len(data)) | |
579 | if data: |
|
578 | if data: | |
580 | self._pipeo.write(data) |
|
579 | self._pipeo.write(data) | |
581 | if flush: |
|
580 | if flush: | |
582 | self._pipeo.flush() |
|
581 | self._pipeo.flush() | |
583 | if self._autoreadstderr: |
|
582 | if self._autoreadstderr: | |
584 | self._readerr() |
|
583 | self._readerr() | |
585 |
|
584 | |||
586 |
|
585 | |||
587 | class sshv2peer(sshv1peer): |
|
586 | class sshv2peer(sshv1peer): | |
588 | """A peer that speakers version 2 of the transport protocol.""" |
|
587 | """A peer that speakers version 2 of the transport protocol.""" | |
589 |
|
588 | |||
590 | # Currently version 2 is identical to version 1 post handshake. |
|
589 | # Currently version 2 is identical to version 1 post handshake. | |
591 | # And handshake is performed before the peer is instantiated. So |
|
590 | # And handshake is performed before the peer is instantiated. So | |
592 | # we need no custom code. |
|
591 | # we need no custom code. | |
593 |
|
592 | |||
594 |
|
593 | |||
595 | def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True): |
|
594 | def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True): | |
596 | """Make a peer instance from existing pipes. |
|
595 | """Make a peer instance from existing pipes. | |
597 |
|
596 | |||
598 | ``path`` and ``proc`` are stored on the eventual peer instance and may |
|
597 | ``path`` and ``proc`` are stored on the eventual peer instance and may | |
599 | not be used for anything meaningful. |
|
598 | not be used for anything meaningful. | |
600 |
|
599 | |||
601 | ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the |
|
600 | ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the | |
602 | SSH server's stdio handles. |
|
601 | SSH server's stdio handles. | |
603 |
|
602 | |||
604 | This function is factored out to allow creating peers that don't |
|
603 | This function is factored out to allow creating peers that don't | |
605 | actually spawn a new process. It is useful for starting SSH protocol |
|
604 | actually spawn a new process. It is useful for starting SSH protocol | |
606 | servers and clients via non-standard means, which can be useful for |
|
605 | servers and clients via non-standard means, which can be useful for | |
607 | testing. |
|
606 | testing. | |
608 | """ |
|
607 | """ | |
609 | try: |
|
608 | try: | |
610 | protoname, caps = _performhandshake(ui, stdin, stdout, stderr) |
|
609 | protoname, caps = _performhandshake(ui, stdin, stdout, stderr) | |
611 | except Exception: |
|
610 | except Exception: | |
612 | _cleanuppipes(ui, stdout, stdin, stderr) |
|
611 | _cleanuppipes(ui, stdout, stdin, stderr) | |
613 | raise |
|
612 | raise | |
614 |
|
613 | |||
615 | if protoname == wireprototypes.SSHV1: |
|
614 | if protoname == wireprototypes.SSHV1: | |
616 | return sshv1peer( |
|
615 | return sshv1peer( | |
617 | ui, |
|
616 | ui, | |
618 | path, |
|
617 | path, | |
619 | proc, |
|
618 | proc, | |
620 | stdin, |
|
619 | stdin, | |
621 | stdout, |
|
620 | stdout, | |
622 | stderr, |
|
621 | stderr, | |
623 | caps, |
|
622 | caps, | |
624 | autoreadstderr=autoreadstderr, |
|
623 | autoreadstderr=autoreadstderr, | |
625 | ) |
|
624 | ) | |
626 | elif protoname == wireprototypes.SSHV2: |
|
625 | elif protoname == wireprototypes.SSHV2: | |
627 | return sshv2peer( |
|
626 | return sshv2peer( | |
628 | ui, |
|
627 | ui, | |
629 | path, |
|
628 | path, | |
630 | proc, |
|
629 | proc, | |
631 | stdin, |
|
630 | stdin, | |
632 | stdout, |
|
631 | stdout, | |
633 | stderr, |
|
632 | stderr, | |
634 | caps, |
|
633 | caps, | |
635 | autoreadstderr=autoreadstderr, |
|
634 | autoreadstderr=autoreadstderr, | |
636 | ) |
|
635 | ) | |
637 | else: |
|
636 | else: | |
638 | _cleanuppipes(ui, stdout, stdin, stderr) |
|
637 | _cleanuppipes(ui, stdout, stdin, stderr) | |
639 | raise error.RepoError( |
|
638 | raise error.RepoError( | |
640 | _(b'unknown version of SSH protocol: %s') % protoname |
|
639 | _(b'unknown version of SSH protocol: %s') % protoname | |
641 | ) |
|
640 | ) | |
642 |
|
641 | |||
643 |
|
642 | |||
644 | def instance(ui, path, create, intents=None, createopts=None): |
|
643 | def instance(ui, path, create, intents=None, createopts=None): | |
645 | """Create an SSH peer. |
|
644 | """Create an SSH peer. | |
646 |
|
645 | |||
647 | The returned object conforms to the ``wireprotov1peer.wirepeer`` interface. |
|
646 | The returned object conforms to the ``wireprotov1peer.wirepeer`` interface. | |
648 | """ |
|
647 | """ | |
649 | u = util.url(path, parsequery=False, parsefragment=False) |
|
648 | u = util.url(path, parsequery=False, parsefragment=False) | |
650 | if u.scheme != b'ssh' or not u.host or u.path is None: |
|
649 | if u.scheme != b'ssh' or not u.host or u.path is None: | |
651 | raise error.RepoError(_(b"couldn't parse location %s") % path) |
|
650 | raise error.RepoError(_(b"couldn't parse location %s") % path) | |
652 |
|
651 | |||
653 | util.checksafessh(path) |
|
652 | util.checksafessh(path) | |
654 |
|
653 | |||
655 | if u.passwd is not None: |
|
654 | if u.passwd is not None: | |
656 | raise error.RepoError(_(b'password in URL not supported')) |
|
655 | raise error.RepoError(_(b'password in URL not supported')) | |
657 |
|
656 | |||
658 | sshcmd = ui.config(b'ui', b'ssh') |
|
657 | sshcmd = ui.config(b'ui', b'ssh') | |
659 | remotecmd = ui.config(b'ui', b'remotecmd') |
|
658 | remotecmd = ui.config(b'ui', b'remotecmd') | |
660 | sshaddenv = dict(ui.configitems(b'sshenv')) |
|
659 | sshaddenv = dict(ui.configitems(b'sshenv')) | |
661 | sshenv = procutil.shellenviron(sshaddenv) |
|
660 | sshenv = procutil.shellenviron(sshaddenv) | |
662 | remotepath = u.path or b'.' |
|
661 | remotepath = u.path or b'.' | |
663 |
|
662 | |||
664 | args = procutil.sshargs(sshcmd, u.host, u.user, u.port) |
|
663 | args = procutil.sshargs(sshcmd, u.host, u.user, u.port) | |
665 |
|
664 | |||
666 | if create: |
|
665 | if create: | |
667 | # We /could/ do this, but only if the remote init command knows how to |
|
666 | # We /could/ do this, but only if the remote init command knows how to | |
668 | # handle them. We don't yet make any assumptions about that. And without |
|
667 | # handle them. We don't yet make any assumptions about that. And without | |
669 | # querying the remote, there's no way of knowing if the remote even |
|
668 | # querying the remote, there's no way of knowing if the remote even | |
670 | # supports said requested feature. |
|
669 | # supports said requested feature. | |
671 | if createopts: |
|
670 | if createopts: | |
672 | raise error.RepoError( |
|
671 | raise error.RepoError( | |
673 | _( |
|
672 | _( | |
674 | b'cannot create remote SSH repositories ' |
|
673 | b'cannot create remote SSH repositories ' | |
675 | b'with extra options' |
|
674 | b'with extra options' | |
676 | ) |
|
675 | ) | |
677 | ) |
|
676 | ) | |
678 |
|
677 | |||
679 | cmd = b'%s %s %s' % ( |
|
678 | cmd = b'%s %s %s' % ( | |
680 | sshcmd, |
|
679 | sshcmd, | |
681 | args, |
|
680 | args, | |
682 | procutil.shellquote( |
|
681 | procutil.shellquote( | |
683 | b'%s init %s' |
|
682 | b'%s init %s' | |
684 | % (_serverquote(remotecmd), _serverquote(remotepath)) |
|
683 | % (_serverquote(remotecmd), _serverquote(remotepath)) | |
685 | ), |
|
684 | ), | |
686 | ) |
|
685 | ) | |
687 | ui.debug(b'running %s\n' % cmd) |
|
686 | ui.debug(b'running %s\n' % cmd) | |
688 | res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv) |
|
687 | res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv) | |
689 | if res != 0: |
|
688 | if res != 0: | |
690 | raise error.RepoError(_(b'could not create remote repo')) |
|
689 | raise error.RepoError(_(b'could not create remote repo')) | |
691 |
|
690 | |||
692 | proc, stdin, stdout, stderr = _makeconnection( |
|
691 | proc, stdin, stdout, stderr = _makeconnection( | |
693 | ui, sshcmd, args, remotecmd, remotepath, sshenv |
|
692 | ui, sshcmd, args, remotecmd, remotepath, sshenv | |
694 | ) |
|
693 | ) | |
695 |
|
694 | |||
696 | peer = makepeer(ui, path, proc, stdin, stdout, stderr) |
|
695 | peer = makepeer(ui, path, proc, stdin, stdout, stderr) | |
697 |
|
696 | |||
698 | # Finally, if supported by the server, notify it about our own |
|
697 | # Finally, if supported by the server, notify it about our own | |
699 | # capabilities. |
|
698 | # capabilities. | |
700 | if b'protocaps' in peer.capabilities(): |
|
699 | if b'protocaps' in peer.capabilities(): | |
701 | try: |
|
700 | try: | |
702 | peer._call( |
|
701 | peer._call( | |
703 | b"protocaps", caps=b' '.join(sorted(_clientcapabilities())) |
|
702 | b"protocaps", caps=b' '.join(sorted(_clientcapabilities())) | |
704 | ) |
|
703 | ) | |
705 | except IOError: |
|
704 | except IOError: | |
706 | peer._cleanup() |
|
705 | peer._cleanup() | |
707 | raise error.RepoError(_(b'capability exchange failed')) |
|
706 | raise error.RepoError(_(b'capability exchange failed')) | |
708 |
|
707 | |||
709 | return peer |
|
708 | return peer |
@@ -1,679 +1,677 b'' | |||||
1 | # procutil.py - utility for managing processes and executable environment |
|
1 | # procutil.py - utility for managing processes and executable environment | |
2 | # |
|
2 | # | |
3 | # Copyright 2005 K. Thananchayan <thananck@yahoo.com> |
|
3 | # Copyright 2005 K. Thananchayan <thananck@yahoo.com> | |
4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
6 | # |
|
6 | # | |
7 | # This software may be used and distributed according to the terms of the |
|
7 | # This software may be used and distributed according to the terms of the | |
8 | # GNU General Public License version 2 or any later version. |
|
8 | # GNU General Public License version 2 or any later version. | |
9 |
|
9 | |||
10 | from __future__ import absolute_import |
|
10 | from __future__ import absolute_import | |
11 |
|
11 | |||
12 | import contextlib |
|
12 | import contextlib | |
13 | import errno |
|
13 | import errno | |
14 | import io |
|
14 | import io | |
15 | import os |
|
15 | import os | |
16 | import signal |
|
16 | import signal | |
17 | import subprocess |
|
17 | import subprocess | |
18 | import sys |
|
18 | import sys | |
19 | import threading |
|
19 | import threading | |
20 | import time |
|
20 | import time | |
21 |
|
21 | |||
22 | from ..i18n import _ |
|
22 | from ..i18n import _ | |
23 | from ..pycompat import ( |
|
23 | from ..pycompat import ( | |
24 | getattr, |
|
24 | getattr, | |
25 | open, |
|
25 | open, | |
26 | ) |
|
26 | ) | |
27 |
|
27 | |||
28 | from .. import ( |
|
28 | from .. import ( | |
29 | encoding, |
|
29 | encoding, | |
30 | error, |
|
30 | error, | |
31 | policy, |
|
31 | policy, | |
32 | pycompat, |
|
32 | pycompat, | |
33 | ) |
|
33 | ) | |
34 |
|
34 | |||
35 | # Import like this to keep import-checker happy |
|
35 | # Import like this to keep import-checker happy | |
36 | from ..utils import resourceutil |
|
36 | from ..utils import resourceutil | |
37 |
|
37 | |||
38 | osutil = policy.importmod('osutil') |
|
38 | osutil = policy.importmod('osutil') | |
39 |
|
39 | |||
40 | stderr = pycompat.stderr |
|
40 | stderr = pycompat.stderr | |
41 | stdin = pycompat.stdin |
|
41 | stdin = pycompat.stdin | |
42 | stdout = pycompat.stdout |
|
42 | stdout = pycompat.stdout | |
43 |
|
43 | |||
44 |
|
44 | |||
45 | def isatty(fp): |
|
45 | def isatty(fp): | |
46 | try: |
|
46 | try: | |
47 | return fp.isatty() |
|
47 | return fp.isatty() | |
48 | except AttributeError: |
|
48 | except AttributeError: | |
49 | return False |
|
49 | return False | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | # glibc determines buffering on first write to stdout - if we replace a TTY |
|
52 | # glibc determines buffering on first write to stdout - if we replace a TTY | |
53 | # destined stdout with a pipe destined stdout (e.g. pager), we want line |
|
53 | # destined stdout with a pipe destined stdout (e.g. pager), we want line | |
54 | # buffering (or unbuffered, on Windows) |
|
54 | # buffering (or unbuffered, on Windows) | |
55 | if isatty(stdout): |
|
55 | if isatty(stdout): | |
56 | if pycompat.iswindows: |
|
56 | if pycompat.iswindows: | |
57 | # Windows doesn't support line buffering |
|
57 | # Windows doesn't support line buffering | |
58 | stdout = os.fdopen(stdout.fileno(), 'wb', 0) |
|
58 | stdout = os.fdopen(stdout.fileno(), 'wb', 0) | |
59 | elif not pycompat.ispy3: |
|
59 | elif not pycompat.ispy3: | |
60 | # on Python 3, stdout (sys.stdout.buffer) is already line buffered and |
|
60 | # on Python 3, stdout (sys.stdout.buffer) is already line buffered and | |
61 | # buffering=1 is not handled in binary mode |
|
61 | # buffering=1 is not handled in binary mode | |
62 | stdout = os.fdopen(stdout.fileno(), 'wb', 1) |
|
62 | stdout = os.fdopen(stdout.fileno(), 'wb', 1) | |
63 |
|
63 | |||
64 | if pycompat.iswindows: |
|
64 | if pycompat.iswindows: | |
65 | from .. import windows as platform |
|
65 | from .. import windows as platform | |
66 |
|
66 | |||
67 | stdout = platform.winstdout(stdout) |
|
67 | stdout = platform.winstdout(stdout) | |
68 | else: |
|
68 | else: | |
69 | from .. import posix as platform |
|
69 | from .. import posix as platform | |
70 |
|
70 | |||
71 | findexe = platform.findexe |
|
71 | findexe = platform.findexe | |
72 | _gethgcmd = platform.gethgcmd |
|
72 | _gethgcmd = platform.gethgcmd | |
73 | getuser = platform.getuser |
|
73 | getuser = platform.getuser | |
74 | getpid = os.getpid |
|
74 | getpid = os.getpid | |
75 | hidewindow = platform.hidewindow |
|
75 | hidewindow = platform.hidewindow | |
76 | quotecommand = platform.quotecommand |
|
|||
77 | readpipe = platform.readpipe |
|
76 | readpipe = platform.readpipe | |
78 | setbinary = platform.setbinary |
|
77 | setbinary = platform.setbinary | |
79 | setsignalhandler = platform.setsignalhandler |
|
78 | setsignalhandler = platform.setsignalhandler | |
80 | shellquote = platform.shellquote |
|
79 | shellquote = platform.shellquote | |
81 | shellsplit = platform.shellsplit |
|
80 | shellsplit = platform.shellsplit | |
82 | spawndetached = platform.spawndetached |
|
81 | spawndetached = platform.spawndetached | |
83 | sshargs = platform.sshargs |
|
82 | sshargs = platform.sshargs | |
84 | testpid = platform.testpid |
|
83 | testpid = platform.testpid | |
85 |
|
84 | |||
86 | try: |
|
85 | try: | |
87 | setprocname = osutil.setprocname |
|
86 | setprocname = osutil.setprocname | |
88 | except AttributeError: |
|
87 | except AttributeError: | |
89 | pass |
|
88 | pass | |
90 | try: |
|
89 | try: | |
91 | unblocksignal = osutil.unblocksignal |
|
90 | unblocksignal = osutil.unblocksignal | |
92 | except AttributeError: |
|
91 | except AttributeError: | |
93 | pass |
|
92 | pass | |
94 |
|
93 | |||
95 | closefds = pycompat.isposix |
|
94 | closefds = pycompat.isposix | |
96 |
|
95 | |||
97 |
|
96 | |||
98 | def explainexit(code): |
|
97 | def explainexit(code): | |
99 | """return a message describing a subprocess status |
|
98 | """return a message describing a subprocess status | |
100 | (codes from kill are negative - not os.system/wait encoding)""" |
|
99 | (codes from kill are negative - not os.system/wait encoding)""" | |
101 | if code >= 0: |
|
100 | if code >= 0: | |
102 | return _(b"exited with status %d") % code |
|
101 | return _(b"exited with status %d") % code | |
103 | return _(b"killed by signal %d") % -code |
|
102 | return _(b"killed by signal %d") % -code | |
104 |
|
103 | |||
105 |
|
104 | |||
106 | class _pfile(object): |
|
105 | class _pfile(object): | |
107 | """File-like wrapper for a stream opened by subprocess.Popen()""" |
|
106 | """File-like wrapper for a stream opened by subprocess.Popen()""" | |
108 |
|
107 | |||
109 | def __init__(self, proc, fp): |
|
108 | def __init__(self, proc, fp): | |
110 | self._proc = proc |
|
109 | self._proc = proc | |
111 | self._fp = fp |
|
110 | self._fp = fp | |
112 |
|
111 | |||
113 | def close(self): |
|
112 | def close(self): | |
114 | # unlike os.popen(), this returns an integer in subprocess coding |
|
113 | # unlike os.popen(), this returns an integer in subprocess coding | |
115 | self._fp.close() |
|
114 | self._fp.close() | |
116 | return self._proc.wait() |
|
115 | return self._proc.wait() | |
117 |
|
116 | |||
118 | def __iter__(self): |
|
117 | def __iter__(self): | |
119 | return iter(self._fp) |
|
118 | return iter(self._fp) | |
120 |
|
119 | |||
121 | def __getattr__(self, attr): |
|
120 | def __getattr__(self, attr): | |
122 | return getattr(self._fp, attr) |
|
121 | return getattr(self._fp, attr) | |
123 |
|
122 | |||
124 | def __enter__(self): |
|
123 | def __enter__(self): | |
125 | return self |
|
124 | return self | |
126 |
|
125 | |||
127 | def __exit__(self, exc_type, exc_value, exc_tb): |
|
126 | def __exit__(self, exc_type, exc_value, exc_tb): | |
128 | self.close() |
|
127 | self.close() | |
129 |
|
128 | |||
130 |
|
129 | |||
131 | def popen(cmd, mode=b'rb', bufsize=-1): |
|
130 | def popen(cmd, mode=b'rb', bufsize=-1): | |
132 | if mode == b'rb': |
|
131 | if mode == b'rb': | |
133 | return _popenreader(cmd, bufsize) |
|
132 | return _popenreader(cmd, bufsize) | |
134 | elif mode == b'wb': |
|
133 | elif mode == b'wb': | |
135 | return _popenwriter(cmd, bufsize) |
|
134 | return _popenwriter(cmd, bufsize) | |
136 | raise error.ProgrammingError(b'unsupported mode: %r' % mode) |
|
135 | raise error.ProgrammingError(b'unsupported mode: %r' % mode) | |
137 |
|
136 | |||
138 |
|
137 | |||
139 | def _popenreader(cmd, bufsize): |
|
138 | def _popenreader(cmd, bufsize): | |
140 | p = subprocess.Popen( |
|
139 | p = subprocess.Popen( | |
141 |
tonativestr( |
|
140 | tonativestr(cmd), | |
142 | shell=True, |
|
141 | shell=True, | |
143 | bufsize=bufsize, |
|
142 | bufsize=bufsize, | |
144 | close_fds=closefds, |
|
143 | close_fds=closefds, | |
145 | stdout=subprocess.PIPE, |
|
144 | stdout=subprocess.PIPE, | |
146 | ) |
|
145 | ) | |
147 | return _pfile(p, p.stdout) |
|
146 | return _pfile(p, p.stdout) | |
148 |
|
147 | |||
149 |
|
148 | |||
150 | def _popenwriter(cmd, bufsize): |
|
149 | def _popenwriter(cmd, bufsize): | |
151 | p = subprocess.Popen( |
|
150 | p = subprocess.Popen( | |
152 |
tonativestr( |
|
151 | tonativestr(cmd), | |
153 | shell=True, |
|
152 | shell=True, | |
154 | bufsize=bufsize, |
|
153 | bufsize=bufsize, | |
155 | close_fds=closefds, |
|
154 | close_fds=closefds, | |
156 | stdin=subprocess.PIPE, |
|
155 | stdin=subprocess.PIPE, | |
157 | ) |
|
156 | ) | |
158 | return _pfile(p, p.stdin) |
|
157 | return _pfile(p, p.stdin) | |
159 |
|
158 | |||
160 |
|
159 | |||
161 | def popen2(cmd, env=None): |
|
160 | def popen2(cmd, env=None): | |
162 | # Setting bufsize to -1 lets the system decide the buffer size. |
|
161 | # Setting bufsize to -1 lets the system decide the buffer size. | |
163 | # The default for bufsize is 0, meaning unbuffered. This leads to |
|
162 | # The default for bufsize is 0, meaning unbuffered. This leads to | |
164 | # poor performance on Mac OS X: http://bugs.python.org/issue4194 |
|
163 | # poor performance on Mac OS X: http://bugs.python.org/issue4194 | |
165 | p = subprocess.Popen( |
|
164 | p = subprocess.Popen( | |
166 | tonativestr(cmd), |
|
165 | tonativestr(cmd), | |
167 | shell=True, |
|
166 | shell=True, | |
168 | bufsize=-1, |
|
167 | bufsize=-1, | |
169 | close_fds=closefds, |
|
168 | close_fds=closefds, | |
170 | stdin=subprocess.PIPE, |
|
169 | stdin=subprocess.PIPE, | |
171 | stdout=subprocess.PIPE, |
|
170 | stdout=subprocess.PIPE, | |
172 | env=tonativeenv(env), |
|
171 | env=tonativeenv(env), | |
173 | ) |
|
172 | ) | |
174 | return p.stdin, p.stdout |
|
173 | return p.stdin, p.stdout | |
175 |
|
174 | |||
176 |
|
175 | |||
177 | def popen3(cmd, env=None): |
|
176 | def popen3(cmd, env=None): | |
178 | stdin, stdout, stderr, p = popen4(cmd, env) |
|
177 | stdin, stdout, stderr, p = popen4(cmd, env) | |
179 | return stdin, stdout, stderr |
|
178 | return stdin, stdout, stderr | |
180 |
|
179 | |||
181 |
|
180 | |||
182 | def popen4(cmd, env=None, bufsize=-1): |
|
181 | def popen4(cmd, env=None, bufsize=-1): | |
183 | p = subprocess.Popen( |
|
182 | p = subprocess.Popen( | |
184 | tonativestr(cmd), |
|
183 | tonativestr(cmd), | |
185 | shell=True, |
|
184 | shell=True, | |
186 | bufsize=bufsize, |
|
185 | bufsize=bufsize, | |
187 | close_fds=closefds, |
|
186 | close_fds=closefds, | |
188 | stdin=subprocess.PIPE, |
|
187 | stdin=subprocess.PIPE, | |
189 | stdout=subprocess.PIPE, |
|
188 | stdout=subprocess.PIPE, | |
190 | stderr=subprocess.PIPE, |
|
189 | stderr=subprocess.PIPE, | |
191 | env=tonativeenv(env), |
|
190 | env=tonativeenv(env), | |
192 | ) |
|
191 | ) | |
193 | return p.stdin, p.stdout, p.stderr, p |
|
192 | return p.stdin, p.stdout, p.stderr, p | |
194 |
|
193 | |||
195 |
|
194 | |||
196 | def pipefilter(s, cmd): |
|
195 | def pipefilter(s, cmd): | |
197 | '''filter string S through command CMD, returning its output''' |
|
196 | '''filter string S through command CMD, returning its output''' | |
198 | p = subprocess.Popen( |
|
197 | p = subprocess.Popen( | |
199 | tonativestr(cmd), |
|
198 | tonativestr(cmd), | |
200 | shell=True, |
|
199 | shell=True, | |
201 | close_fds=closefds, |
|
200 | close_fds=closefds, | |
202 | stdin=subprocess.PIPE, |
|
201 | stdin=subprocess.PIPE, | |
203 | stdout=subprocess.PIPE, |
|
202 | stdout=subprocess.PIPE, | |
204 | ) |
|
203 | ) | |
205 | pout, perr = p.communicate(s) |
|
204 | pout, perr = p.communicate(s) | |
206 | return pout |
|
205 | return pout | |
207 |
|
206 | |||
208 |
|
207 | |||
209 | def tempfilter(s, cmd): |
|
208 | def tempfilter(s, cmd): | |
210 | '''filter string S through a pair of temporary files with CMD. |
|
209 | '''filter string S through a pair of temporary files with CMD. | |
211 | CMD is used as a template to create the real command to be run, |
|
210 | CMD is used as a template to create the real command to be run, | |
212 | with the strings INFILE and OUTFILE replaced by the real names of |
|
211 | with the strings INFILE and OUTFILE replaced by the real names of | |
213 | the temporary files generated.''' |
|
212 | the temporary files generated.''' | |
214 | inname, outname = None, None |
|
213 | inname, outname = None, None | |
215 | try: |
|
214 | try: | |
216 | infd, inname = pycompat.mkstemp(prefix=b'hg-filter-in-') |
|
215 | infd, inname = pycompat.mkstemp(prefix=b'hg-filter-in-') | |
217 | fp = os.fdopen(infd, 'wb') |
|
216 | fp = os.fdopen(infd, 'wb') | |
218 | fp.write(s) |
|
217 | fp.write(s) | |
219 | fp.close() |
|
218 | fp.close() | |
220 | outfd, outname = pycompat.mkstemp(prefix=b'hg-filter-out-') |
|
219 | outfd, outname = pycompat.mkstemp(prefix=b'hg-filter-out-') | |
221 | os.close(outfd) |
|
220 | os.close(outfd) | |
222 | cmd = cmd.replace(b'INFILE', inname) |
|
221 | cmd = cmd.replace(b'INFILE', inname) | |
223 | cmd = cmd.replace(b'OUTFILE', outname) |
|
222 | cmd = cmd.replace(b'OUTFILE', outname) | |
224 | code = system(cmd) |
|
223 | code = system(cmd) | |
225 | if pycompat.sysplatform == b'OpenVMS' and code & 1: |
|
224 | if pycompat.sysplatform == b'OpenVMS' and code & 1: | |
226 | code = 0 |
|
225 | code = 0 | |
227 | if code: |
|
226 | if code: | |
228 | raise error.Abort( |
|
227 | raise error.Abort( | |
229 | _(b"command '%s' failed: %s") % (cmd, explainexit(code)) |
|
228 | _(b"command '%s' failed: %s") % (cmd, explainexit(code)) | |
230 | ) |
|
229 | ) | |
231 | with open(outname, b'rb') as fp: |
|
230 | with open(outname, b'rb') as fp: | |
232 | return fp.read() |
|
231 | return fp.read() | |
233 | finally: |
|
232 | finally: | |
234 | try: |
|
233 | try: | |
235 | if inname: |
|
234 | if inname: | |
236 | os.unlink(inname) |
|
235 | os.unlink(inname) | |
237 | except OSError: |
|
236 | except OSError: | |
238 | pass |
|
237 | pass | |
239 | try: |
|
238 | try: | |
240 | if outname: |
|
239 | if outname: | |
241 | os.unlink(outname) |
|
240 | os.unlink(outname) | |
242 | except OSError: |
|
241 | except OSError: | |
243 | pass |
|
242 | pass | |
244 |
|
243 | |||
245 |
|
244 | |||
246 | _filtertable = { |
|
245 | _filtertable = { | |
247 | b'tempfile:': tempfilter, |
|
246 | b'tempfile:': tempfilter, | |
248 | b'pipe:': pipefilter, |
|
247 | b'pipe:': pipefilter, | |
249 | } |
|
248 | } | |
250 |
|
249 | |||
251 |
|
250 | |||
252 | def filter(s, cmd): |
|
251 | def filter(s, cmd): | |
253 | """filter a string through a command that transforms its input to its |
|
252 | """filter a string through a command that transforms its input to its | |
254 | output""" |
|
253 | output""" | |
255 | for name, fn in pycompat.iteritems(_filtertable): |
|
254 | for name, fn in pycompat.iteritems(_filtertable): | |
256 | if cmd.startswith(name): |
|
255 | if cmd.startswith(name): | |
257 | return fn(s, cmd[len(name) :].lstrip()) |
|
256 | return fn(s, cmd[len(name) :].lstrip()) | |
258 | return pipefilter(s, cmd) |
|
257 | return pipefilter(s, cmd) | |
259 |
|
258 | |||
260 |
|
259 | |||
261 | _hgexecutable = None |
|
260 | _hgexecutable = None | |
262 |
|
261 | |||
263 |
|
262 | |||
264 | def hgexecutable(): |
|
263 | def hgexecutable(): | |
265 | """return location of the 'hg' executable. |
|
264 | """return location of the 'hg' executable. | |
266 |
|
265 | |||
267 | Defaults to $HG or 'hg' in the search path. |
|
266 | Defaults to $HG or 'hg' in the search path. | |
268 | """ |
|
267 | """ | |
269 | if _hgexecutable is None: |
|
268 | if _hgexecutable is None: | |
270 | hg = encoding.environ.get(b'HG') |
|
269 | hg = encoding.environ.get(b'HG') | |
271 | mainmod = sys.modules['__main__'] |
|
270 | mainmod = sys.modules['__main__'] | |
272 | if hg: |
|
271 | if hg: | |
273 | _sethgexecutable(hg) |
|
272 | _sethgexecutable(hg) | |
274 | elif resourceutil.mainfrozen(): |
|
273 | elif resourceutil.mainfrozen(): | |
275 | if getattr(sys, 'frozen', None) == 'macosx_app': |
|
274 | if getattr(sys, 'frozen', None) == 'macosx_app': | |
276 | # Env variable set by py2app |
|
275 | # Env variable set by py2app | |
277 | _sethgexecutable(encoding.environ[b'EXECUTABLEPATH']) |
|
276 | _sethgexecutable(encoding.environ[b'EXECUTABLEPATH']) | |
278 | else: |
|
277 | else: | |
279 | _sethgexecutable(pycompat.sysexecutable) |
|
278 | _sethgexecutable(pycompat.sysexecutable) | |
280 | elif ( |
|
279 | elif ( | |
281 | not pycompat.iswindows |
|
280 | not pycompat.iswindows | |
282 | and os.path.basename(getattr(mainmod, '__file__', '')) == 'hg' |
|
281 | and os.path.basename(getattr(mainmod, '__file__', '')) == 'hg' | |
283 | ): |
|
282 | ): | |
284 | _sethgexecutable(pycompat.fsencode(mainmod.__file__)) |
|
283 | _sethgexecutable(pycompat.fsencode(mainmod.__file__)) | |
285 | else: |
|
284 | else: | |
286 | _sethgexecutable( |
|
285 | _sethgexecutable( | |
287 | findexe(b'hg') or os.path.basename(pycompat.sysargv[0]) |
|
286 | findexe(b'hg') or os.path.basename(pycompat.sysargv[0]) | |
288 | ) |
|
287 | ) | |
289 | return _hgexecutable |
|
288 | return _hgexecutable | |
290 |
|
289 | |||
291 |
|
290 | |||
292 | def _sethgexecutable(path): |
|
291 | def _sethgexecutable(path): | |
293 | """set location of the 'hg' executable""" |
|
292 | """set location of the 'hg' executable""" | |
294 | global _hgexecutable |
|
293 | global _hgexecutable | |
295 | _hgexecutable = path |
|
294 | _hgexecutable = path | |
296 |
|
295 | |||
297 |
|
296 | |||
298 | def _testfileno(f, stdf): |
|
297 | def _testfileno(f, stdf): | |
299 | fileno = getattr(f, 'fileno', None) |
|
298 | fileno = getattr(f, 'fileno', None) | |
300 | try: |
|
299 | try: | |
301 | return fileno and fileno() == stdf.fileno() |
|
300 | return fileno and fileno() == stdf.fileno() | |
302 | except io.UnsupportedOperation: |
|
301 | except io.UnsupportedOperation: | |
303 | return False # fileno() raised UnsupportedOperation |
|
302 | return False # fileno() raised UnsupportedOperation | |
304 |
|
303 | |||
305 |
|
304 | |||
306 | def isstdin(f): |
|
305 | def isstdin(f): | |
307 | return _testfileno(f, sys.__stdin__) |
|
306 | return _testfileno(f, sys.__stdin__) | |
308 |
|
307 | |||
309 |
|
308 | |||
310 | def isstdout(f): |
|
309 | def isstdout(f): | |
311 | return _testfileno(f, sys.__stdout__) |
|
310 | return _testfileno(f, sys.__stdout__) | |
312 |
|
311 | |||
313 |
|
312 | |||
314 | def protectstdio(uin, uout): |
|
313 | def protectstdio(uin, uout): | |
315 | """Duplicate streams and redirect original if (uin, uout) are stdio |
|
314 | """Duplicate streams and redirect original if (uin, uout) are stdio | |
316 |
|
315 | |||
317 | If uin is stdin, it's redirected to /dev/null. If uout is stdout, it's |
|
316 | If uin is stdin, it's redirected to /dev/null. If uout is stdout, it's | |
318 | redirected to stderr so the output is still readable. |
|
317 | redirected to stderr so the output is still readable. | |
319 |
|
318 | |||
320 | Returns (fin, fout) which point to the original (uin, uout) fds, but |
|
319 | Returns (fin, fout) which point to the original (uin, uout) fds, but | |
321 | may be copy of (uin, uout). The returned streams can be considered |
|
320 | may be copy of (uin, uout). The returned streams can be considered | |
322 | "owned" in that print(), exec(), etc. never reach to them. |
|
321 | "owned" in that print(), exec(), etc. never reach to them. | |
323 | """ |
|
322 | """ | |
324 | uout.flush() |
|
323 | uout.flush() | |
325 | fin, fout = uin, uout |
|
324 | fin, fout = uin, uout | |
326 | if _testfileno(uin, stdin): |
|
325 | if _testfileno(uin, stdin): | |
327 | newfd = os.dup(uin.fileno()) |
|
326 | newfd = os.dup(uin.fileno()) | |
328 | nullfd = os.open(os.devnull, os.O_RDONLY) |
|
327 | nullfd = os.open(os.devnull, os.O_RDONLY) | |
329 | os.dup2(nullfd, uin.fileno()) |
|
328 | os.dup2(nullfd, uin.fileno()) | |
330 | os.close(nullfd) |
|
329 | os.close(nullfd) | |
331 | fin = os.fdopen(newfd, 'rb') |
|
330 | fin = os.fdopen(newfd, 'rb') | |
332 | if _testfileno(uout, stdout): |
|
331 | if _testfileno(uout, stdout): | |
333 | newfd = os.dup(uout.fileno()) |
|
332 | newfd = os.dup(uout.fileno()) | |
334 | os.dup2(stderr.fileno(), uout.fileno()) |
|
333 | os.dup2(stderr.fileno(), uout.fileno()) | |
335 | fout = os.fdopen(newfd, 'wb') |
|
334 | fout = os.fdopen(newfd, 'wb') | |
336 | return fin, fout |
|
335 | return fin, fout | |
337 |
|
336 | |||
338 |
|
337 | |||
339 | def restorestdio(uin, uout, fin, fout): |
|
338 | def restorestdio(uin, uout, fin, fout): | |
340 | """Restore (uin, uout) streams from possibly duplicated (fin, fout)""" |
|
339 | """Restore (uin, uout) streams from possibly duplicated (fin, fout)""" | |
341 | uout.flush() |
|
340 | uout.flush() | |
342 | for f, uif in [(fin, uin), (fout, uout)]: |
|
341 | for f, uif in [(fin, uin), (fout, uout)]: | |
343 | if f is not uif: |
|
342 | if f is not uif: | |
344 | os.dup2(f.fileno(), uif.fileno()) |
|
343 | os.dup2(f.fileno(), uif.fileno()) | |
345 | f.close() |
|
344 | f.close() | |
346 |
|
345 | |||
347 |
|
346 | |||
348 | def shellenviron(environ=None): |
|
347 | def shellenviron(environ=None): | |
349 | """return environ with optional override, useful for shelling out""" |
|
348 | """return environ with optional override, useful for shelling out""" | |
350 |
|
349 | |||
351 | def py2shell(val): |
|
350 | def py2shell(val): | |
352 | """convert python object into string that is useful to shell""" |
|
351 | """convert python object into string that is useful to shell""" | |
353 | if val is None or val is False: |
|
352 | if val is None or val is False: | |
354 | return b'0' |
|
353 | return b'0' | |
355 | if val is True: |
|
354 | if val is True: | |
356 | return b'1' |
|
355 | return b'1' | |
357 | return pycompat.bytestr(val) |
|
356 | return pycompat.bytestr(val) | |
358 |
|
357 | |||
359 | env = dict(encoding.environ) |
|
358 | env = dict(encoding.environ) | |
360 | if environ: |
|
359 | if environ: | |
361 | env.update((k, py2shell(v)) for k, v in pycompat.iteritems(environ)) |
|
360 | env.update((k, py2shell(v)) for k, v in pycompat.iteritems(environ)) | |
362 | env[b'HG'] = hgexecutable() |
|
361 | env[b'HG'] = hgexecutable() | |
363 | return env |
|
362 | return env | |
364 |
|
363 | |||
365 |
|
364 | |||
366 | if pycompat.iswindows: |
|
365 | if pycompat.iswindows: | |
367 |
|
366 | |||
368 | def shelltonative(cmd, env): |
|
367 | def shelltonative(cmd, env): | |
369 | return platform.shelltocmdexe( # pytype: disable=module-attr |
|
368 | return platform.shelltocmdexe( # pytype: disable=module-attr | |
370 | cmd, shellenviron(env) |
|
369 | cmd, shellenviron(env) | |
371 | ) |
|
370 | ) | |
372 |
|
371 | |||
373 | tonativestr = encoding.strfromlocal |
|
372 | tonativestr = encoding.strfromlocal | |
374 | else: |
|
373 | else: | |
375 |
|
374 | |||
376 | def shelltonative(cmd, env): |
|
375 | def shelltonative(cmd, env): | |
377 | return cmd |
|
376 | return cmd | |
378 |
|
377 | |||
379 | tonativestr = pycompat.identity |
|
378 | tonativestr = pycompat.identity | |
380 |
|
379 | |||
381 |
|
380 | |||
382 | def tonativeenv(env): |
|
381 | def tonativeenv(env): | |
383 | '''convert the environment from bytes to strings suitable for Popen(), etc. |
|
382 | '''convert the environment from bytes to strings suitable for Popen(), etc. | |
384 | ''' |
|
383 | ''' | |
385 | return pycompat.rapply(tonativestr, env) |
|
384 | return pycompat.rapply(tonativestr, env) | |
386 |
|
385 | |||
387 |
|
386 | |||
388 | def system(cmd, environ=None, cwd=None, out=None): |
|
387 | def system(cmd, environ=None, cwd=None, out=None): | |
389 | '''enhanced shell command execution. |
|
388 | '''enhanced shell command execution. | |
390 | run with environment maybe modified, maybe in different dir. |
|
389 | run with environment maybe modified, maybe in different dir. | |
391 |
|
390 | |||
392 | if out is specified, it is assumed to be a file-like object that has a |
|
391 | if out is specified, it is assumed to be a file-like object that has a | |
393 | write() method. stdout and stderr will be redirected to out.''' |
|
392 | write() method. stdout and stderr will be redirected to out.''' | |
394 | try: |
|
393 | try: | |
395 | stdout.flush() |
|
394 | stdout.flush() | |
396 | except Exception: |
|
395 | except Exception: | |
397 | pass |
|
396 | pass | |
398 | cmd = quotecommand(cmd) |
|
|||
399 | env = shellenviron(environ) |
|
397 | env = shellenviron(environ) | |
400 | if out is None or isstdout(out): |
|
398 | if out is None or isstdout(out): | |
401 | rc = subprocess.call( |
|
399 | rc = subprocess.call( | |
402 | tonativestr(cmd), |
|
400 | tonativestr(cmd), | |
403 | shell=True, |
|
401 | shell=True, | |
404 | close_fds=closefds, |
|
402 | close_fds=closefds, | |
405 | env=tonativeenv(env), |
|
403 | env=tonativeenv(env), | |
406 | cwd=pycompat.rapply(tonativestr, cwd), |
|
404 | cwd=pycompat.rapply(tonativestr, cwd), | |
407 | ) |
|
405 | ) | |
408 | else: |
|
406 | else: | |
409 | proc = subprocess.Popen( |
|
407 | proc = subprocess.Popen( | |
410 | tonativestr(cmd), |
|
408 | tonativestr(cmd), | |
411 | shell=True, |
|
409 | shell=True, | |
412 | close_fds=closefds, |
|
410 | close_fds=closefds, | |
413 | env=tonativeenv(env), |
|
411 | env=tonativeenv(env), | |
414 | cwd=pycompat.rapply(tonativestr, cwd), |
|
412 | cwd=pycompat.rapply(tonativestr, cwd), | |
415 | stdout=subprocess.PIPE, |
|
413 | stdout=subprocess.PIPE, | |
416 | stderr=subprocess.STDOUT, |
|
414 | stderr=subprocess.STDOUT, | |
417 | ) |
|
415 | ) | |
418 | for line in iter(proc.stdout.readline, b''): |
|
416 | for line in iter(proc.stdout.readline, b''): | |
419 | out.write(line) |
|
417 | out.write(line) | |
420 | proc.wait() |
|
418 | proc.wait() | |
421 | rc = proc.returncode |
|
419 | rc = proc.returncode | |
422 | if pycompat.sysplatform == b'OpenVMS' and rc & 1: |
|
420 | if pycompat.sysplatform == b'OpenVMS' and rc & 1: | |
423 | rc = 0 |
|
421 | rc = 0 | |
424 | return rc |
|
422 | return rc | |
425 |
|
423 | |||
426 |
|
424 | |||
427 | _is_gui = None |
|
425 | _is_gui = None | |
428 |
|
426 | |||
429 |
|
427 | |||
430 | def _gui(): |
|
428 | def _gui(): | |
431 | '''Are we running in a GUI?''' |
|
429 | '''Are we running in a GUI?''' | |
432 | if pycompat.isdarwin: |
|
430 | if pycompat.isdarwin: | |
433 | if b'SSH_CONNECTION' in encoding.environ: |
|
431 | if b'SSH_CONNECTION' in encoding.environ: | |
434 | # handle SSH access to a box where the user is logged in |
|
432 | # handle SSH access to a box where the user is logged in | |
435 | return False |
|
433 | return False | |
436 | elif getattr(osutil, 'isgui', None): |
|
434 | elif getattr(osutil, 'isgui', None): | |
437 | # check if a CoreGraphics session is available |
|
435 | # check if a CoreGraphics session is available | |
438 | return osutil.isgui() |
|
436 | return osutil.isgui() | |
439 | else: |
|
437 | else: | |
440 | # pure build; use a safe default |
|
438 | # pure build; use a safe default | |
441 | return True |
|
439 | return True | |
442 | else: |
|
440 | else: | |
443 | return pycompat.iswindows or encoding.environ.get(b"DISPLAY") |
|
441 | return pycompat.iswindows or encoding.environ.get(b"DISPLAY") | |
444 |
|
442 | |||
445 |
|
443 | |||
446 | def gui(): |
|
444 | def gui(): | |
447 | global _is_gui |
|
445 | global _is_gui | |
448 | if _is_gui is None: |
|
446 | if _is_gui is None: | |
449 | _is_gui = _gui() |
|
447 | _is_gui = _gui() | |
450 | return _is_gui |
|
448 | return _is_gui | |
451 |
|
449 | |||
452 |
|
450 | |||
453 | def hgcmd(): |
|
451 | def hgcmd(): | |
454 | """Return the command used to execute current hg |
|
452 | """Return the command used to execute current hg | |
455 |
|
453 | |||
456 | This is different from hgexecutable() because on Windows we want |
|
454 | This is different from hgexecutable() because on Windows we want | |
457 | to avoid things opening new shell windows like batch files, so we |
|
455 | to avoid things opening new shell windows like batch files, so we | |
458 | get either the python call or current executable. |
|
456 | get either the python call or current executable. | |
459 | """ |
|
457 | """ | |
460 | if resourceutil.mainfrozen(): |
|
458 | if resourceutil.mainfrozen(): | |
461 | if getattr(sys, 'frozen', None) == 'macosx_app': |
|
459 | if getattr(sys, 'frozen', None) == 'macosx_app': | |
462 | # Env variable set by py2app |
|
460 | # Env variable set by py2app | |
463 | return [encoding.environ[b'EXECUTABLEPATH']] |
|
461 | return [encoding.environ[b'EXECUTABLEPATH']] | |
464 | else: |
|
462 | else: | |
465 | return [pycompat.sysexecutable] |
|
463 | return [pycompat.sysexecutable] | |
466 | return _gethgcmd() |
|
464 | return _gethgcmd() | |
467 |
|
465 | |||
468 |
|
466 | |||
469 | def rundetached(args, condfn): |
|
467 | def rundetached(args, condfn): | |
470 | """Execute the argument list in a detached process. |
|
468 | """Execute the argument list in a detached process. | |
471 |
|
469 | |||
472 | condfn is a callable which is called repeatedly and should return |
|
470 | condfn is a callable which is called repeatedly and should return | |
473 | True once the child process is known to have started successfully. |
|
471 | True once the child process is known to have started successfully. | |
474 | At this point, the child process PID is returned. If the child |
|
472 | At this point, the child process PID is returned. If the child | |
475 | process fails to start or finishes before condfn() evaluates to |
|
473 | process fails to start or finishes before condfn() evaluates to | |
476 | True, return -1. |
|
474 | True, return -1. | |
477 | """ |
|
475 | """ | |
478 | # Windows case is easier because the child process is either |
|
476 | # Windows case is easier because the child process is either | |
479 | # successfully starting and validating the condition or exiting |
|
477 | # successfully starting and validating the condition or exiting | |
480 | # on failure. We just poll on its PID. On Unix, if the child |
|
478 | # on failure. We just poll on its PID. On Unix, if the child | |
481 | # process fails to start, it will be left in a zombie state until |
|
479 | # process fails to start, it will be left in a zombie state until | |
482 | # the parent wait on it, which we cannot do since we expect a long |
|
480 | # the parent wait on it, which we cannot do since we expect a long | |
483 | # running process on success. Instead we listen for SIGCHLD telling |
|
481 | # running process on success. Instead we listen for SIGCHLD telling | |
484 | # us our child process terminated. |
|
482 | # us our child process terminated. | |
485 | terminated = set() |
|
483 | terminated = set() | |
486 |
|
484 | |||
487 | def handler(signum, frame): |
|
485 | def handler(signum, frame): | |
488 | terminated.add(os.wait()) |
|
486 | terminated.add(os.wait()) | |
489 |
|
487 | |||
490 | prevhandler = None |
|
488 | prevhandler = None | |
491 | SIGCHLD = getattr(signal, 'SIGCHLD', None) |
|
489 | SIGCHLD = getattr(signal, 'SIGCHLD', None) | |
492 | if SIGCHLD is not None: |
|
490 | if SIGCHLD is not None: | |
493 | prevhandler = signal.signal(SIGCHLD, handler) |
|
491 | prevhandler = signal.signal(SIGCHLD, handler) | |
494 | try: |
|
492 | try: | |
495 | pid = spawndetached(args) |
|
493 | pid = spawndetached(args) | |
496 | while not condfn(): |
|
494 | while not condfn(): | |
497 | if (pid in terminated or not testpid(pid)) and not condfn(): |
|
495 | if (pid in terminated or not testpid(pid)) and not condfn(): | |
498 | return -1 |
|
496 | return -1 | |
499 | time.sleep(0.1) |
|
497 | time.sleep(0.1) | |
500 | return pid |
|
498 | return pid | |
501 | finally: |
|
499 | finally: | |
502 | if prevhandler is not None: |
|
500 | if prevhandler is not None: | |
503 | signal.signal(signal.SIGCHLD, prevhandler) |
|
501 | signal.signal(signal.SIGCHLD, prevhandler) | |
504 |
|
502 | |||
505 |
|
503 | |||
506 | @contextlib.contextmanager |
|
504 | @contextlib.contextmanager | |
507 | def uninterruptible(warn): |
|
505 | def uninterruptible(warn): | |
508 | """Inhibit SIGINT handling on a region of code. |
|
506 | """Inhibit SIGINT handling on a region of code. | |
509 |
|
507 | |||
510 | Note that if this is called in a non-main thread, it turns into a no-op. |
|
508 | Note that if this is called in a non-main thread, it turns into a no-op. | |
511 |
|
509 | |||
512 | Args: |
|
510 | Args: | |
513 | warn: A callable which takes no arguments, and returns True if the |
|
511 | warn: A callable which takes no arguments, and returns True if the | |
514 | previous signal handling should be restored. |
|
512 | previous signal handling should be restored. | |
515 | """ |
|
513 | """ | |
516 |
|
514 | |||
517 | oldsiginthandler = [signal.getsignal(signal.SIGINT)] |
|
515 | oldsiginthandler = [signal.getsignal(signal.SIGINT)] | |
518 | shouldbail = [] |
|
516 | shouldbail = [] | |
519 |
|
517 | |||
520 | def disabledsiginthandler(*args): |
|
518 | def disabledsiginthandler(*args): | |
521 | if warn(): |
|
519 | if warn(): | |
522 | signal.signal(signal.SIGINT, oldsiginthandler[0]) |
|
520 | signal.signal(signal.SIGINT, oldsiginthandler[0]) | |
523 | del oldsiginthandler[0] |
|
521 | del oldsiginthandler[0] | |
524 | shouldbail.append(True) |
|
522 | shouldbail.append(True) | |
525 |
|
523 | |||
526 | try: |
|
524 | try: | |
527 | try: |
|
525 | try: | |
528 | signal.signal(signal.SIGINT, disabledsiginthandler) |
|
526 | signal.signal(signal.SIGINT, disabledsiginthandler) | |
529 | except ValueError: |
|
527 | except ValueError: | |
530 | # wrong thread, oh well, we tried |
|
528 | # wrong thread, oh well, we tried | |
531 | del oldsiginthandler[0] |
|
529 | del oldsiginthandler[0] | |
532 | yield |
|
530 | yield | |
533 | finally: |
|
531 | finally: | |
534 | if oldsiginthandler: |
|
532 | if oldsiginthandler: | |
535 | signal.signal(signal.SIGINT, oldsiginthandler[0]) |
|
533 | signal.signal(signal.SIGINT, oldsiginthandler[0]) | |
536 | if shouldbail: |
|
534 | if shouldbail: | |
537 | raise KeyboardInterrupt |
|
535 | raise KeyboardInterrupt | |
538 |
|
536 | |||
539 |
|
537 | |||
540 | if pycompat.iswindows: |
|
538 | if pycompat.iswindows: | |
541 | # no fork on Windows, but we can create a detached process |
|
539 | # no fork on Windows, but we can create a detached process | |
542 | # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx |
|
540 | # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx | |
543 | # No stdlib constant exists for this value |
|
541 | # No stdlib constant exists for this value | |
544 | DETACHED_PROCESS = 0x00000008 |
|
542 | DETACHED_PROCESS = 0x00000008 | |
545 | # Following creation flags might create a console GUI window. |
|
543 | # Following creation flags might create a console GUI window. | |
546 | # Using subprocess.CREATE_NEW_CONSOLE might helps. |
|
544 | # Using subprocess.CREATE_NEW_CONSOLE might helps. | |
547 | # See https://phab.mercurial-scm.org/D1701 for discussion |
|
545 | # See https://phab.mercurial-scm.org/D1701 for discussion | |
548 | _creationflags = ( |
|
546 | _creationflags = ( | |
549 | DETACHED_PROCESS |
|
547 | DETACHED_PROCESS | |
550 | | subprocess.CREATE_NEW_PROCESS_GROUP # pytype: disable=module-attr |
|
548 | | subprocess.CREATE_NEW_PROCESS_GROUP # pytype: disable=module-attr | |
551 | ) |
|
549 | ) | |
552 |
|
550 | |||
553 | def runbgcommand( |
|
551 | def runbgcommand( | |
554 | script, |
|
552 | script, | |
555 | env, |
|
553 | env, | |
556 | shell=False, |
|
554 | shell=False, | |
557 | stdout=None, |
|
555 | stdout=None, | |
558 | stderr=None, |
|
556 | stderr=None, | |
559 | ensurestart=True, |
|
557 | ensurestart=True, | |
560 | record_wait=None, |
|
558 | record_wait=None, | |
561 | ): |
|
559 | ): | |
562 | '''Spawn a command without waiting for it to finish.''' |
|
560 | '''Spawn a command without waiting for it to finish.''' | |
563 | # we can't use close_fds *and* redirect stdin. I'm not sure that we |
|
561 | # we can't use close_fds *and* redirect stdin. I'm not sure that we | |
564 | # need to because the detached process has no console connection. |
|
562 | # need to because the detached process has no console connection. | |
565 | p = subprocess.Popen( |
|
563 | p = subprocess.Popen( | |
566 | tonativestr(script), |
|
564 | tonativestr(script), | |
567 | shell=shell, |
|
565 | shell=shell, | |
568 | env=tonativeenv(env), |
|
566 | env=tonativeenv(env), | |
569 | close_fds=True, |
|
567 | close_fds=True, | |
570 | creationflags=_creationflags, |
|
568 | creationflags=_creationflags, | |
571 | stdout=stdout, |
|
569 | stdout=stdout, | |
572 | stderr=stderr, |
|
570 | stderr=stderr, | |
573 | ) |
|
571 | ) | |
574 | if record_wait is not None: |
|
572 | if record_wait is not None: | |
575 | record_wait(p.wait) |
|
573 | record_wait(p.wait) | |
576 |
|
574 | |||
577 |
|
575 | |||
578 | else: |
|
576 | else: | |
579 |
|
577 | |||
580 | def runbgcommand( |
|
578 | def runbgcommand( | |
581 | cmd, |
|
579 | cmd, | |
582 | env, |
|
580 | env, | |
583 | shell=False, |
|
581 | shell=False, | |
584 | stdout=None, |
|
582 | stdout=None, | |
585 | stderr=None, |
|
583 | stderr=None, | |
586 | ensurestart=True, |
|
584 | ensurestart=True, | |
587 | record_wait=None, |
|
585 | record_wait=None, | |
588 | ): |
|
586 | ): | |
589 | '''Spawn a command without waiting for it to finish. |
|
587 | '''Spawn a command without waiting for it to finish. | |
590 |
|
588 | |||
591 |
|
589 | |||
592 | When `record_wait` is not None, the spawned process will not be fully |
|
590 | When `record_wait` is not None, the spawned process will not be fully | |
593 | detached and the `record_wait` argument will be called with a the |
|
591 | detached and the `record_wait` argument will be called with a the | |
594 | `Subprocess.wait` function for the spawned process. This is mostly |
|
592 | `Subprocess.wait` function for the spawned process. This is mostly | |
595 | useful for developers that need to make sure the spawned process |
|
593 | useful for developers that need to make sure the spawned process | |
596 | finished before a certain point. (eg: writing test)''' |
|
594 | finished before a certain point. (eg: writing test)''' | |
597 | if pycompat.isdarwin: |
|
595 | if pycompat.isdarwin: | |
598 | # avoid crash in CoreFoundation in case another thread |
|
596 | # avoid crash in CoreFoundation in case another thread | |
599 | # calls gui() while we're calling fork(). |
|
597 | # calls gui() while we're calling fork(). | |
600 | gui() |
|
598 | gui() | |
601 |
|
599 | |||
602 | # double-fork to completely detach from the parent process |
|
600 | # double-fork to completely detach from the parent process | |
603 | # based on http://code.activestate.com/recipes/278731 |
|
601 | # based on http://code.activestate.com/recipes/278731 | |
604 | if record_wait is None: |
|
602 | if record_wait is None: | |
605 | pid = os.fork() |
|
603 | pid = os.fork() | |
606 | if pid: |
|
604 | if pid: | |
607 | if not ensurestart: |
|
605 | if not ensurestart: | |
608 | # Even though we're not waiting on the child process, |
|
606 | # Even though we're not waiting on the child process, | |
609 | # we still must call waitpid() on it at some point so |
|
607 | # we still must call waitpid() on it at some point so | |
610 | # it's not a zombie/defunct. This is especially relevant for |
|
608 | # it's not a zombie/defunct. This is especially relevant for | |
611 | # chg since the parent process won't die anytime soon. |
|
609 | # chg since the parent process won't die anytime soon. | |
612 | # We use a thread to make the overhead tiny. |
|
610 | # We use a thread to make the overhead tiny. | |
613 | def _do_wait(): |
|
611 | def _do_wait(): | |
614 | os.waitpid(pid, 0) |
|
612 | os.waitpid(pid, 0) | |
615 |
|
613 | |||
616 | threading.Thread(target=_do_wait, daemon=True).start() |
|
614 | threading.Thread(target=_do_wait, daemon=True).start() | |
617 | return |
|
615 | return | |
618 | # Parent process |
|
616 | # Parent process | |
619 | (_pid, status) = os.waitpid(pid, 0) |
|
617 | (_pid, status) = os.waitpid(pid, 0) | |
620 | if os.WIFEXITED(status): |
|
618 | if os.WIFEXITED(status): | |
621 | returncode = os.WEXITSTATUS(status) |
|
619 | returncode = os.WEXITSTATUS(status) | |
622 | else: |
|
620 | else: | |
623 | returncode = -(os.WTERMSIG(status)) |
|
621 | returncode = -(os.WTERMSIG(status)) | |
624 | if returncode != 0: |
|
622 | if returncode != 0: | |
625 | # The child process's return code is 0 on success, an errno |
|
623 | # The child process's return code is 0 on success, an errno | |
626 | # value on failure, or 255 if we don't have a valid errno |
|
624 | # value on failure, or 255 if we don't have a valid errno | |
627 | # value. |
|
625 | # value. | |
628 | # |
|
626 | # | |
629 | # (It would be slightly nicer to return the full exception info |
|
627 | # (It would be slightly nicer to return the full exception info | |
630 | # over a pipe as the subprocess module does. For now it |
|
628 | # over a pipe as the subprocess module does. For now it | |
631 | # doesn't seem worth adding that complexity here, though.) |
|
629 | # doesn't seem worth adding that complexity here, though.) | |
632 | if returncode == 255: |
|
630 | if returncode == 255: | |
633 | returncode = errno.EINVAL |
|
631 | returncode = errno.EINVAL | |
634 | raise OSError( |
|
632 | raise OSError( | |
635 | returncode, |
|
633 | returncode, | |
636 | b'error running %r: %s' |
|
634 | b'error running %r: %s' | |
637 | % (cmd, os.strerror(returncode)), |
|
635 | % (cmd, os.strerror(returncode)), | |
638 | ) |
|
636 | ) | |
639 | return |
|
637 | return | |
640 |
|
638 | |||
641 | returncode = 255 |
|
639 | returncode = 255 | |
642 | try: |
|
640 | try: | |
643 | if record_wait is None: |
|
641 | if record_wait is None: | |
644 | # Start a new session |
|
642 | # Start a new session | |
645 | os.setsid() |
|
643 | os.setsid() | |
646 |
|
644 | |||
647 | stdin = open(os.devnull, b'r') |
|
645 | stdin = open(os.devnull, b'r') | |
648 | if stdout is None: |
|
646 | if stdout is None: | |
649 | stdout = open(os.devnull, b'w') |
|
647 | stdout = open(os.devnull, b'w') | |
650 | if stderr is None: |
|
648 | if stderr is None: | |
651 | stderr = open(os.devnull, b'w') |
|
649 | stderr = open(os.devnull, b'w') | |
652 |
|
650 | |||
653 | # connect stdin to devnull to make sure the subprocess can't |
|
651 | # connect stdin to devnull to make sure the subprocess can't | |
654 | # muck up that stream for mercurial. |
|
652 | # muck up that stream for mercurial. | |
655 | p = subprocess.Popen( |
|
653 | p = subprocess.Popen( | |
656 | cmd, |
|
654 | cmd, | |
657 | shell=shell, |
|
655 | shell=shell, | |
658 | env=env, |
|
656 | env=env, | |
659 | close_fds=True, |
|
657 | close_fds=True, | |
660 | stdin=stdin, |
|
658 | stdin=stdin, | |
661 | stdout=stdout, |
|
659 | stdout=stdout, | |
662 | stderr=stderr, |
|
660 | stderr=stderr, | |
663 | ) |
|
661 | ) | |
664 | if record_wait is not None: |
|
662 | if record_wait is not None: | |
665 | record_wait(p.wait) |
|
663 | record_wait(p.wait) | |
666 | returncode = 0 |
|
664 | returncode = 0 | |
667 | except EnvironmentError as ex: |
|
665 | except EnvironmentError as ex: | |
668 | returncode = ex.errno & 0xFF |
|
666 | returncode = ex.errno & 0xFF | |
669 | if returncode == 0: |
|
667 | if returncode == 0: | |
670 | # This shouldn't happen, but just in case make sure the |
|
668 | # This shouldn't happen, but just in case make sure the | |
671 | # return code is never 0 here. |
|
669 | # return code is never 0 here. | |
672 | returncode = 255 |
|
670 | returncode = 255 | |
673 | except Exception: |
|
671 | except Exception: | |
674 | returncode = 255 |
|
672 | returncode = 255 | |
675 | finally: |
|
673 | finally: | |
676 | # mission accomplished, this child needs to exit and not |
|
674 | # mission accomplished, this child needs to exit and not | |
677 | # continue the hg process here. |
|
675 | # continue the hg process here. | |
678 | if record_wait is None: |
|
676 | if record_wait is None: | |
679 | os._exit(returncode) |
|
677 | os._exit(returncode) |
@@ -1,680 +1,675 b'' | |||||
1 | # windows.py - Windows utility function implementations for Mercurial |
|
1 | # windows.py - Windows utility function implementations for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import getpass |
|
11 | import getpass | |
12 | import msvcrt |
|
12 | import msvcrt | |
13 | import os |
|
13 | import os | |
14 | import re |
|
14 | import re | |
15 | import stat |
|
15 | import stat | |
16 | import string |
|
16 | import string | |
17 | import sys |
|
17 | import sys | |
18 |
|
18 | |||
19 | from .i18n import _ |
|
19 | from .i18n import _ | |
20 | from .pycompat import getattr |
|
20 | from .pycompat import getattr | |
21 | from . import ( |
|
21 | from . import ( | |
22 | encoding, |
|
22 | encoding, | |
23 | error, |
|
23 | error, | |
24 | policy, |
|
24 | policy, | |
25 | pycompat, |
|
25 | pycompat, | |
26 | win32, |
|
26 | win32, | |
27 | ) |
|
27 | ) | |
28 |
|
28 | |||
29 | try: |
|
29 | try: | |
30 | import _winreg as winreg # pytype: disable=import-error |
|
30 | import _winreg as winreg # pytype: disable=import-error | |
31 |
|
31 | |||
32 | winreg.CloseKey |
|
32 | winreg.CloseKey | |
33 | except ImportError: |
|
33 | except ImportError: | |
34 | # py2 only |
|
34 | # py2 only | |
35 | import winreg # pytype: disable=import-error |
|
35 | import winreg # pytype: disable=import-error | |
36 |
|
36 | |||
37 | osutil = policy.importmod('osutil') |
|
37 | osutil = policy.importmod('osutil') | |
38 |
|
38 | |||
39 | getfsmountpoint = win32.getvolumename |
|
39 | getfsmountpoint = win32.getvolumename | |
40 | getfstype = win32.getfstype |
|
40 | getfstype = win32.getfstype | |
41 | getuser = win32.getuser |
|
41 | getuser = win32.getuser | |
42 | hidewindow = win32.hidewindow |
|
42 | hidewindow = win32.hidewindow | |
43 | makedir = win32.makedir |
|
43 | makedir = win32.makedir | |
44 | nlinks = win32.nlinks |
|
44 | nlinks = win32.nlinks | |
45 | oslink = win32.oslink |
|
45 | oslink = win32.oslink | |
46 | samedevice = win32.samedevice |
|
46 | samedevice = win32.samedevice | |
47 | samefile = win32.samefile |
|
47 | samefile = win32.samefile | |
48 | setsignalhandler = win32.setsignalhandler |
|
48 | setsignalhandler = win32.setsignalhandler | |
49 | spawndetached = win32.spawndetached |
|
49 | spawndetached = win32.spawndetached | |
50 | split = os.path.split |
|
50 | split = os.path.split | |
51 | testpid = win32.testpid |
|
51 | testpid = win32.testpid | |
52 | unlink = win32.unlink |
|
52 | unlink = win32.unlink | |
53 |
|
53 | |||
54 | umask = 0o022 |
|
54 | umask = 0o022 | |
55 |
|
55 | |||
56 |
|
56 | |||
57 | class mixedfilemodewrapper(object): |
|
57 | class mixedfilemodewrapper(object): | |
58 | """Wraps a file handle when it is opened in read/write mode. |
|
58 | """Wraps a file handle when it is opened in read/write mode. | |
59 |
|
59 | |||
60 | fopen() and fdopen() on Windows have a specific-to-Windows requirement |
|
60 | fopen() and fdopen() on Windows have a specific-to-Windows requirement | |
61 | that files opened with mode r+, w+, or a+ make a call to a file positioning |
|
61 | that files opened with mode r+, w+, or a+ make a call to a file positioning | |
62 | function when switching between reads and writes. Without this extra call, |
|
62 | function when switching between reads and writes. Without this extra call, | |
63 | Python will raise a not very intuitive "IOError: [Errno 0] Error." |
|
63 | Python will raise a not very intuitive "IOError: [Errno 0] Error." | |
64 |
|
64 | |||
65 | This class wraps posixfile instances when the file is opened in read/write |
|
65 | This class wraps posixfile instances when the file is opened in read/write | |
66 | mode and automatically adds checks or inserts appropriate file positioning |
|
66 | mode and automatically adds checks or inserts appropriate file positioning | |
67 | calls when necessary. |
|
67 | calls when necessary. | |
68 | """ |
|
68 | """ | |
69 |
|
69 | |||
70 | OPNONE = 0 |
|
70 | OPNONE = 0 | |
71 | OPREAD = 1 |
|
71 | OPREAD = 1 | |
72 | OPWRITE = 2 |
|
72 | OPWRITE = 2 | |
73 |
|
73 | |||
74 | def __init__(self, fp): |
|
74 | def __init__(self, fp): | |
75 | object.__setattr__(self, '_fp', fp) |
|
75 | object.__setattr__(self, '_fp', fp) | |
76 | object.__setattr__(self, '_lastop', 0) |
|
76 | object.__setattr__(self, '_lastop', 0) | |
77 |
|
77 | |||
78 | def __enter__(self): |
|
78 | def __enter__(self): | |
79 | self._fp.__enter__() |
|
79 | self._fp.__enter__() | |
80 | return self |
|
80 | return self | |
81 |
|
81 | |||
82 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
82 | def __exit__(self, exc_type, exc_val, exc_tb): | |
83 | self._fp.__exit__(exc_type, exc_val, exc_tb) |
|
83 | self._fp.__exit__(exc_type, exc_val, exc_tb) | |
84 |
|
84 | |||
85 | def __getattr__(self, name): |
|
85 | def __getattr__(self, name): | |
86 | return getattr(self._fp, name) |
|
86 | return getattr(self._fp, name) | |
87 |
|
87 | |||
88 | def __setattr__(self, name, value): |
|
88 | def __setattr__(self, name, value): | |
89 | return self._fp.__setattr__(name, value) |
|
89 | return self._fp.__setattr__(name, value) | |
90 |
|
90 | |||
91 | def _noopseek(self): |
|
91 | def _noopseek(self): | |
92 | self._fp.seek(0, os.SEEK_CUR) |
|
92 | self._fp.seek(0, os.SEEK_CUR) | |
93 |
|
93 | |||
94 | def seek(self, *args, **kwargs): |
|
94 | def seek(self, *args, **kwargs): | |
95 | object.__setattr__(self, '_lastop', self.OPNONE) |
|
95 | object.__setattr__(self, '_lastop', self.OPNONE) | |
96 | return self._fp.seek(*args, **kwargs) |
|
96 | return self._fp.seek(*args, **kwargs) | |
97 |
|
97 | |||
98 | def write(self, d): |
|
98 | def write(self, d): | |
99 | if self._lastop == self.OPREAD: |
|
99 | if self._lastop == self.OPREAD: | |
100 | self._noopseek() |
|
100 | self._noopseek() | |
101 |
|
101 | |||
102 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
102 | object.__setattr__(self, '_lastop', self.OPWRITE) | |
103 | return self._fp.write(d) |
|
103 | return self._fp.write(d) | |
104 |
|
104 | |||
105 | def writelines(self, *args, **kwargs): |
|
105 | def writelines(self, *args, **kwargs): | |
106 | if self._lastop == self.OPREAD: |
|
106 | if self._lastop == self.OPREAD: | |
107 | self._noopeseek() |
|
107 | self._noopeseek() | |
108 |
|
108 | |||
109 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
109 | object.__setattr__(self, '_lastop', self.OPWRITE) | |
110 | return self._fp.writelines(*args, **kwargs) |
|
110 | return self._fp.writelines(*args, **kwargs) | |
111 |
|
111 | |||
112 | def read(self, *args, **kwargs): |
|
112 | def read(self, *args, **kwargs): | |
113 | if self._lastop == self.OPWRITE: |
|
113 | if self._lastop == self.OPWRITE: | |
114 | self._noopseek() |
|
114 | self._noopseek() | |
115 |
|
115 | |||
116 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
116 | object.__setattr__(self, '_lastop', self.OPREAD) | |
117 | return self._fp.read(*args, **kwargs) |
|
117 | return self._fp.read(*args, **kwargs) | |
118 |
|
118 | |||
119 | def readline(self, *args, **kwargs): |
|
119 | def readline(self, *args, **kwargs): | |
120 | if self._lastop == self.OPWRITE: |
|
120 | if self._lastop == self.OPWRITE: | |
121 | self._noopseek() |
|
121 | self._noopseek() | |
122 |
|
122 | |||
123 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
123 | object.__setattr__(self, '_lastop', self.OPREAD) | |
124 | return self._fp.readline(*args, **kwargs) |
|
124 | return self._fp.readline(*args, **kwargs) | |
125 |
|
125 | |||
126 | def readlines(self, *args, **kwargs): |
|
126 | def readlines(self, *args, **kwargs): | |
127 | if self._lastop == self.OPWRITE: |
|
127 | if self._lastop == self.OPWRITE: | |
128 | self._noopseek() |
|
128 | self._noopseek() | |
129 |
|
129 | |||
130 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
130 | object.__setattr__(self, '_lastop', self.OPREAD) | |
131 | return self._fp.readlines(*args, **kwargs) |
|
131 | return self._fp.readlines(*args, **kwargs) | |
132 |
|
132 | |||
133 |
|
133 | |||
134 | class fdproxy(object): |
|
134 | class fdproxy(object): | |
135 | """Wraps osutil.posixfile() to override the name attribute to reflect the |
|
135 | """Wraps osutil.posixfile() to override the name attribute to reflect the | |
136 | underlying file name. |
|
136 | underlying file name. | |
137 | """ |
|
137 | """ | |
138 |
|
138 | |||
139 | def __init__(self, name, fp): |
|
139 | def __init__(self, name, fp): | |
140 | self.name = name |
|
140 | self.name = name | |
141 | self._fp = fp |
|
141 | self._fp = fp | |
142 |
|
142 | |||
143 | def __enter__(self): |
|
143 | def __enter__(self): | |
144 | self._fp.__enter__() |
|
144 | self._fp.__enter__() | |
145 | # Return this wrapper for the context manager so that the name is |
|
145 | # Return this wrapper for the context manager so that the name is | |
146 | # still available. |
|
146 | # still available. | |
147 | return self |
|
147 | return self | |
148 |
|
148 | |||
149 | def __exit__(self, exc_type, exc_value, traceback): |
|
149 | def __exit__(self, exc_type, exc_value, traceback): | |
150 | self._fp.__exit__(exc_type, exc_value, traceback) |
|
150 | self._fp.__exit__(exc_type, exc_value, traceback) | |
151 |
|
151 | |||
152 | def __iter__(self): |
|
152 | def __iter__(self): | |
153 | return iter(self._fp) |
|
153 | return iter(self._fp) | |
154 |
|
154 | |||
155 | def __getattr__(self, name): |
|
155 | def __getattr__(self, name): | |
156 | return getattr(self._fp, name) |
|
156 | return getattr(self._fp, name) | |
157 |
|
157 | |||
158 |
|
158 | |||
159 | def posixfile(name, mode=b'r', buffering=-1): |
|
159 | def posixfile(name, mode=b'r', buffering=-1): | |
160 | '''Open a file with even more POSIX-like semantics''' |
|
160 | '''Open a file with even more POSIX-like semantics''' | |
161 | try: |
|
161 | try: | |
162 | fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError |
|
162 | fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError | |
163 |
|
163 | |||
164 | # PyFile_FromFd() ignores the name, and seems to report fp.name as the |
|
164 | # PyFile_FromFd() ignores the name, and seems to report fp.name as the | |
165 | # underlying file descriptor. |
|
165 | # underlying file descriptor. | |
166 | if pycompat.ispy3: |
|
166 | if pycompat.ispy3: | |
167 | fp = fdproxy(name, fp) |
|
167 | fp = fdproxy(name, fp) | |
168 |
|
168 | |||
169 | # The position when opening in append mode is implementation defined, so |
|
169 | # The position when opening in append mode is implementation defined, so | |
170 | # make it consistent with other platforms, which position at EOF. |
|
170 | # make it consistent with other platforms, which position at EOF. | |
171 | if b'a' in mode: |
|
171 | if b'a' in mode: | |
172 | fp.seek(0, os.SEEK_END) |
|
172 | fp.seek(0, os.SEEK_END) | |
173 |
|
173 | |||
174 | if b'+' in mode: |
|
174 | if b'+' in mode: | |
175 | return mixedfilemodewrapper(fp) |
|
175 | return mixedfilemodewrapper(fp) | |
176 |
|
176 | |||
177 | return fp |
|
177 | return fp | |
178 | except WindowsError as err: |
|
178 | except WindowsError as err: | |
179 | # convert to a friendlier exception |
|
179 | # convert to a friendlier exception | |
180 | raise IOError( |
|
180 | raise IOError( | |
181 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) |
|
181 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) | |
182 | ) |
|
182 | ) | |
183 |
|
183 | |||
184 |
|
184 | |||
185 | # may be wrapped by win32mbcs extension |
|
185 | # may be wrapped by win32mbcs extension | |
186 | listdir = osutil.listdir |
|
186 | listdir = osutil.listdir | |
187 |
|
187 | |||
188 |
|
188 | |||
189 | class winstdout(object): |
|
189 | class winstdout(object): | |
190 | '''stdout on windows misbehaves if sent through a pipe''' |
|
190 | '''stdout on windows misbehaves if sent through a pipe''' | |
191 |
|
191 | |||
192 | def __init__(self, fp): |
|
192 | def __init__(self, fp): | |
193 | self.fp = fp |
|
193 | self.fp = fp | |
194 |
|
194 | |||
195 | def __getattr__(self, key): |
|
195 | def __getattr__(self, key): | |
196 | return getattr(self.fp, key) |
|
196 | return getattr(self.fp, key) | |
197 |
|
197 | |||
198 | def close(self): |
|
198 | def close(self): | |
199 | try: |
|
199 | try: | |
200 | self.fp.close() |
|
200 | self.fp.close() | |
201 | except IOError: |
|
201 | except IOError: | |
202 | pass |
|
202 | pass | |
203 |
|
203 | |||
204 | def write(self, s): |
|
204 | def write(self, s): | |
205 | try: |
|
205 | try: | |
206 | # This is workaround for "Not enough space" error on |
|
206 | # This is workaround for "Not enough space" error on | |
207 | # writing large size of data to console. |
|
207 | # writing large size of data to console. | |
208 | limit = 16000 |
|
208 | limit = 16000 | |
209 | l = len(s) |
|
209 | l = len(s) | |
210 | start = 0 |
|
210 | start = 0 | |
211 | self.softspace = 0 |
|
211 | self.softspace = 0 | |
212 | while start < l: |
|
212 | while start < l: | |
213 | end = start + limit |
|
213 | end = start + limit | |
214 | self.fp.write(s[start:end]) |
|
214 | self.fp.write(s[start:end]) | |
215 | start = end |
|
215 | start = end | |
216 | except IOError as inst: |
|
216 | except IOError as inst: | |
217 | if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst): |
|
217 | if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst): | |
218 | raise |
|
218 | raise | |
219 | self.close() |
|
219 | self.close() | |
220 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
220 | raise IOError(errno.EPIPE, 'Broken pipe') | |
221 |
|
221 | |||
222 | def flush(self): |
|
222 | def flush(self): | |
223 | try: |
|
223 | try: | |
224 | return self.fp.flush() |
|
224 | return self.fp.flush() | |
225 | except IOError as inst: |
|
225 | except IOError as inst: | |
226 | if not win32.lasterrorwaspipeerror(inst): |
|
226 | if not win32.lasterrorwaspipeerror(inst): | |
227 | raise |
|
227 | raise | |
228 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
228 | raise IOError(errno.EPIPE, 'Broken pipe') | |
229 |
|
229 | |||
230 |
|
230 | |||
231 | def openhardlinks(): |
|
231 | def openhardlinks(): | |
232 | return True |
|
232 | return True | |
233 |
|
233 | |||
234 |
|
234 | |||
235 | def parsepatchoutput(output_line): |
|
235 | def parsepatchoutput(output_line): | |
236 | """parses the output produced by patch and returns the filename""" |
|
236 | """parses the output produced by patch and returns the filename""" | |
237 | pf = output_line[14:] |
|
237 | pf = output_line[14:] | |
238 | if pf[0] == b'`': |
|
238 | if pf[0] == b'`': | |
239 | pf = pf[1:-1] # Remove the quotes |
|
239 | pf = pf[1:-1] # Remove the quotes | |
240 | return pf |
|
240 | return pf | |
241 |
|
241 | |||
242 |
|
242 | |||
243 | def sshargs(sshcmd, host, user, port): |
|
243 | def sshargs(sshcmd, host, user, port): | |
244 | '''Build argument list for ssh or Plink''' |
|
244 | '''Build argument list for ssh or Plink''' | |
245 | pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p' |
|
245 | pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p' | |
246 | args = user and (b"%s@%s" % (user, host)) or host |
|
246 | args = user and (b"%s@%s" % (user, host)) or host | |
247 | if args.startswith(b'-') or args.startswith(b'/'): |
|
247 | if args.startswith(b'-') or args.startswith(b'/'): | |
248 | raise error.Abort( |
|
248 | raise error.Abort( | |
249 | _(b'illegal ssh hostname or username starting with - or /: %s') |
|
249 | _(b'illegal ssh hostname or username starting with - or /: %s') | |
250 | % args |
|
250 | % args | |
251 | ) |
|
251 | ) | |
252 | args = shellquote(args) |
|
252 | args = shellquote(args) | |
253 | if port: |
|
253 | if port: | |
254 | args = b'%s %s %s' % (pflag, shellquote(port), args) |
|
254 | args = b'%s %s %s' % (pflag, shellquote(port), args) | |
255 | return args |
|
255 | return args | |
256 |
|
256 | |||
257 |
|
257 | |||
258 | def setflags(f, l, x): |
|
258 | def setflags(f, l, x): | |
259 | pass |
|
259 | pass | |
260 |
|
260 | |||
261 |
|
261 | |||
262 | def copymode(src, dst, mode=None, enforcewritable=False): |
|
262 | def copymode(src, dst, mode=None, enforcewritable=False): | |
263 | pass |
|
263 | pass | |
264 |
|
264 | |||
265 |
|
265 | |||
266 | def checkexec(path): |
|
266 | def checkexec(path): | |
267 | return False |
|
267 | return False | |
268 |
|
268 | |||
269 |
|
269 | |||
270 | def checklink(path): |
|
270 | def checklink(path): | |
271 | return False |
|
271 | return False | |
272 |
|
272 | |||
273 |
|
273 | |||
274 | def setbinary(fd): |
|
274 | def setbinary(fd): | |
275 | # When run without console, pipes may expose invalid |
|
275 | # When run without console, pipes may expose invalid | |
276 | # fileno(), usually set to -1. |
|
276 | # fileno(), usually set to -1. | |
277 | fno = getattr(fd, 'fileno', None) |
|
277 | fno = getattr(fd, 'fileno', None) | |
278 | if fno is not None and fno() >= 0: |
|
278 | if fno is not None and fno() >= 0: | |
279 | msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr |
|
279 | msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr | |
280 |
|
280 | |||
281 |
|
281 | |||
282 | def pconvert(path): |
|
282 | def pconvert(path): | |
283 | return path.replace(pycompat.ossep, b'/') |
|
283 | return path.replace(pycompat.ossep, b'/') | |
284 |
|
284 | |||
285 |
|
285 | |||
286 | def localpath(path): |
|
286 | def localpath(path): | |
287 | return path.replace(b'/', b'\\') |
|
287 | return path.replace(b'/', b'\\') | |
288 |
|
288 | |||
289 |
|
289 | |||
290 | def normpath(path): |
|
290 | def normpath(path): | |
291 | return pconvert(os.path.normpath(path)) |
|
291 | return pconvert(os.path.normpath(path)) | |
292 |
|
292 | |||
293 |
|
293 | |||
294 | def normcase(path): |
|
294 | def normcase(path): | |
295 | return encoding.upper(path) # NTFS compares via upper() |
|
295 | return encoding.upper(path) # NTFS compares via upper() | |
296 |
|
296 | |||
297 |
|
297 | |||
298 | # see posix.py for definitions |
|
298 | # see posix.py for definitions | |
299 | normcasespec = encoding.normcasespecs.upper |
|
299 | normcasespec = encoding.normcasespecs.upper | |
300 | normcasefallback = encoding.upperfallback |
|
300 | normcasefallback = encoding.upperfallback | |
301 |
|
301 | |||
302 |
|
302 | |||
303 | def samestat(s1, s2): |
|
303 | def samestat(s1, s2): | |
304 | return False |
|
304 | return False | |
305 |
|
305 | |||
306 |
|
306 | |||
307 | def shelltocmdexe(path, env): |
|
307 | def shelltocmdexe(path, env): | |
308 | r"""Convert shell variables in the form $var and ${var} inside ``path`` |
|
308 | r"""Convert shell variables in the form $var and ${var} inside ``path`` | |
309 | to %var% form. Existing Windows style variables are left unchanged. |
|
309 | to %var% form. Existing Windows style variables are left unchanged. | |
310 |
|
310 | |||
311 | The variables are limited to the given environment. Unknown variables are |
|
311 | The variables are limited to the given environment. Unknown variables are | |
312 | left unchanged. |
|
312 | left unchanged. | |
313 |
|
313 | |||
314 | >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'} |
|
314 | >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'} | |
315 | >>> # Only valid values are expanded |
|
315 | >>> # Only valid values are expanded | |
316 | >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%', |
|
316 | >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%', | |
317 | ... e) |
|
317 | ... e) | |
318 | 'cmd %var1% %var2% %var3% $missing ${missing} %missing%' |
|
318 | 'cmd %var1% %var2% %var3% $missing ${missing} %missing%' | |
319 | >>> # Single quote prevents expansion, as does \$ escaping |
|
319 | >>> # Single quote prevents expansion, as does \$ escaping | |
320 | >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e) |
|
320 | >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e) | |
321 | 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\' |
|
321 | 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\' | |
322 | >>> # $$ is not special. %% is not special either, but can be the end and |
|
322 | >>> # $$ is not special. %% is not special either, but can be the end and | |
323 | >>> # start of consecutive variables |
|
323 | >>> # start of consecutive variables | |
324 | >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e) |
|
324 | >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e) | |
325 | 'cmd $$ %% %var1%%var2%' |
|
325 | 'cmd $$ %% %var1%%var2%' | |
326 | >>> # No double substitution |
|
326 | >>> # No double substitution | |
327 | >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'}) |
|
327 | >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'}) | |
328 | '%var1% %var1%' |
|
328 | '%var1% %var1%' | |
329 | >>> # Tilde expansion |
|
329 | >>> # Tilde expansion | |
330 | >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {}) |
|
330 | >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {}) | |
331 | '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/' |
|
331 | '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/' | |
332 | """ |
|
332 | """ | |
333 | if not any(c in path for c in b"$'~"): |
|
333 | if not any(c in path for c in b"$'~"): | |
334 | return path |
|
334 | return path | |
335 |
|
335 | |||
336 | varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-' |
|
336 | varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-' | |
337 |
|
337 | |||
338 | res = b'' |
|
338 | res = b'' | |
339 | index = 0 |
|
339 | index = 0 | |
340 | pathlen = len(path) |
|
340 | pathlen = len(path) | |
341 | while index < pathlen: |
|
341 | while index < pathlen: | |
342 | c = path[index : index + 1] |
|
342 | c = path[index : index + 1] | |
343 | if c == b'\'': # no expansion within single quotes |
|
343 | if c == b'\'': # no expansion within single quotes | |
344 | path = path[index + 1 :] |
|
344 | path = path[index + 1 :] | |
345 | pathlen = len(path) |
|
345 | pathlen = len(path) | |
346 | try: |
|
346 | try: | |
347 | index = path.index(b'\'') |
|
347 | index = path.index(b'\'') | |
348 | res += b'"' + path[:index] + b'"' |
|
348 | res += b'"' + path[:index] + b'"' | |
349 | except ValueError: |
|
349 | except ValueError: | |
350 | res += c + path |
|
350 | res += c + path | |
351 | index = pathlen - 1 |
|
351 | index = pathlen - 1 | |
352 | elif c == b'%': # variable |
|
352 | elif c == b'%': # variable | |
353 | path = path[index + 1 :] |
|
353 | path = path[index + 1 :] | |
354 | pathlen = len(path) |
|
354 | pathlen = len(path) | |
355 | try: |
|
355 | try: | |
356 | index = path.index(b'%') |
|
356 | index = path.index(b'%') | |
357 | except ValueError: |
|
357 | except ValueError: | |
358 | res += b'%' + path |
|
358 | res += b'%' + path | |
359 | index = pathlen - 1 |
|
359 | index = pathlen - 1 | |
360 | else: |
|
360 | else: | |
361 | var = path[:index] |
|
361 | var = path[:index] | |
362 | res += b'%' + var + b'%' |
|
362 | res += b'%' + var + b'%' | |
363 | elif c == b'$': # variable |
|
363 | elif c == b'$': # variable | |
364 | if path[index + 1 : index + 2] == b'{': |
|
364 | if path[index + 1 : index + 2] == b'{': | |
365 | path = path[index + 2 :] |
|
365 | path = path[index + 2 :] | |
366 | pathlen = len(path) |
|
366 | pathlen = len(path) | |
367 | try: |
|
367 | try: | |
368 | index = path.index(b'}') |
|
368 | index = path.index(b'}') | |
369 | var = path[:index] |
|
369 | var = path[:index] | |
370 |
|
370 | |||
371 | # See below for why empty variables are handled specially |
|
371 | # See below for why empty variables are handled specially | |
372 | if env.get(var, b'') != b'': |
|
372 | if env.get(var, b'') != b'': | |
373 | res += b'%' + var + b'%' |
|
373 | res += b'%' + var + b'%' | |
374 | else: |
|
374 | else: | |
375 | res += b'${' + var + b'}' |
|
375 | res += b'${' + var + b'}' | |
376 | except ValueError: |
|
376 | except ValueError: | |
377 | res += b'${' + path |
|
377 | res += b'${' + path | |
378 | index = pathlen - 1 |
|
378 | index = pathlen - 1 | |
379 | else: |
|
379 | else: | |
380 | var = b'' |
|
380 | var = b'' | |
381 | index += 1 |
|
381 | index += 1 | |
382 | c = path[index : index + 1] |
|
382 | c = path[index : index + 1] | |
383 | while c != b'' and c in varchars: |
|
383 | while c != b'' and c in varchars: | |
384 | var += c |
|
384 | var += c | |
385 | index += 1 |
|
385 | index += 1 | |
386 | c = path[index : index + 1] |
|
386 | c = path[index : index + 1] | |
387 | # Some variables (like HG_OLDNODE) may be defined, but have an |
|
387 | # Some variables (like HG_OLDNODE) may be defined, but have an | |
388 | # empty value. Those need to be skipped because when spawning |
|
388 | # empty value. Those need to be skipped because when spawning | |
389 | # cmd.exe to run the hook, it doesn't replace %VAR% for an empty |
|
389 | # cmd.exe to run the hook, it doesn't replace %VAR% for an empty | |
390 | # VAR, and that really confuses things like revset expressions. |
|
390 | # VAR, and that really confuses things like revset expressions. | |
391 | # OTOH, if it's left in Unix format and the hook runs sh.exe, it |
|
391 | # OTOH, if it's left in Unix format and the hook runs sh.exe, it | |
392 | # will substitute to an empty string, and everything is happy. |
|
392 | # will substitute to an empty string, and everything is happy. | |
393 | if env.get(var, b'') != b'': |
|
393 | if env.get(var, b'') != b'': | |
394 | res += b'%' + var + b'%' |
|
394 | res += b'%' + var + b'%' | |
395 | else: |
|
395 | else: | |
396 | res += b'$' + var |
|
396 | res += b'$' + var | |
397 |
|
397 | |||
398 | if c != b'': |
|
398 | if c != b'': | |
399 | index -= 1 |
|
399 | index -= 1 | |
400 | elif ( |
|
400 | elif ( | |
401 | c == b'~' |
|
401 | c == b'~' | |
402 | and index + 1 < pathlen |
|
402 | and index + 1 < pathlen | |
403 | and path[index + 1 : index + 2] in (b'\\', b'/') |
|
403 | and path[index + 1 : index + 2] in (b'\\', b'/') | |
404 | ): |
|
404 | ): | |
405 | res += b"%USERPROFILE%" |
|
405 | res += b"%USERPROFILE%" | |
406 | elif ( |
|
406 | elif ( | |
407 | c == b'\\' |
|
407 | c == b'\\' | |
408 | and index + 1 < pathlen |
|
408 | and index + 1 < pathlen | |
409 | and path[index + 1 : index + 2] in (b'$', b'~') |
|
409 | and path[index + 1 : index + 2] in (b'$', b'~') | |
410 | ): |
|
410 | ): | |
411 | # Skip '\', but only if it is escaping $ or ~ |
|
411 | # Skip '\', but only if it is escaping $ or ~ | |
412 | res += path[index + 1 : index + 2] |
|
412 | res += path[index + 1 : index + 2] | |
413 | index += 1 |
|
413 | index += 1 | |
414 | else: |
|
414 | else: | |
415 | res += c |
|
415 | res += c | |
416 |
|
416 | |||
417 | index += 1 |
|
417 | index += 1 | |
418 | return res |
|
418 | return res | |
419 |
|
419 | |||
420 |
|
420 | |||
421 | # A sequence of backslashes is special iff it precedes a double quote: |
|
421 | # A sequence of backslashes is special iff it precedes a double quote: | |
422 | # - if there's an even number of backslashes, the double quote is not |
|
422 | # - if there's an even number of backslashes, the double quote is not | |
423 | # quoted (i.e. it ends the quoted region) |
|
423 | # quoted (i.e. it ends the quoted region) | |
424 | # - if there's an odd number of backslashes, the double quote is quoted |
|
424 | # - if there's an odd number of backslashes, the double quote is quoted | |
425 | # - in both cases, every pair of backslashes is unquoted into a single |
|
425 | # - in both cases, every pair of backslashes is unquoted into a single | |
426 | # backslash |
|
426 | # backslash | |
427 | # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) |
|
427 | # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) | |
428 | # So, to quote a string, we must surround it in double quotes, double |
|
428 | # So, to quote a string, we must surround it in double quotes, double | |
429 | # the number of backslashes that precede double quotes and add another |
|
429 | # the number of backslashes that precede double quotes and add another | |
430 | # backslash before every double quote (being careful with the double |
|
430 | # backslash before every double quote (being careful with the double | |
431 | # quote we've appended to the end) |
|
431 | # quote we've appended to the end) | |
432 | _quotere = None |
|
432 | _quotere = None | |
433 | _needsshellquote = None |
|
433 | _needsshellquote = None | |
434 |
|
434 | |||
435 |
|
435 | |||
436 | def shellquote(s): |
|
436 | def shellquote(s): | |
437 | r""" |
|
437 | r""" | |
438 | >>> shellquote(br'C:\Users\xyz') |
|
438 | >>> shellquote(br'C:\Users\xyz') | |
439 | '"C:\\Users\\xyz"' |
|
439 | '"C:\\Users\\xyz"' | |
440 | >>> shellquote(br'C:\Users\xyz/mixed') |
|
440 | >>> shellquote(br'C:\Users\xyz/mixed') | |
441 | '"C:\\Users\\xyz/mixed"' |
|
441 | '"C:\\Users\\xyz/mixed"' | |
442 | >>> # Would be safe not to quote too, since it is all double backslashes |
|
442 | >>> # Would be safe not to quote too, since it is all double backslashes | |
443 | >>> shellquote(br'C:\\Users\\xyz') |
|
443 | >>> shellquote(br'C:\\Users\\xyz') | |
444 | '"C:\\\\Users\\\\xyz"' |
|
444 | '"C:\\\\Users\\\\xyz"' | |
445 | >>> # But this must be quoted |
|
445 | >>> # But this must be quoted | |
446 | >>> shellquote(br'C:\\Users\\xyz/abc') |
|
446 | >>> shellquote(br'C:\\Users\\xyz/abc') | |
447 | '"C:\\\\Users\\\\xyz/abc"' |
|
447 | '"C:\\\\Users\\\\xyz/abc"' | |
448 | """ |
|
448 | """ | |
449 | global _quotere |
|
449 | global _quotere | |
450 | if _quotere is None: |
|
450 | if _quotere is None: | |
451 | _quotere = re.compile(br'(\\*)("|\\$)') |
|
451 | _quotere = re.compile(br'(\\*)("|\\$)') | |
452 | global _needsshellquote |
|
452 | global _needsshellquote | |
453 | if _needsshellquote is None: |
|
453 | if _needsshellquote is None: | |
454 | # ":" is also treated as "safe character", because it is used as a part |
|
454 | # ":" is also treated as "safe character", because it is used as a part | |
455 | # of path name on Windows. "\" is also part of a path name, but isn't |
|
455 | # of path name on Windows. "\" is also part of a path name, but isn't | |
456 | # safe because shlex.split() (kind of) treats it as an escape char and |
|
456 | # safe because shlex.split() (kind of) treats it as an escape char and | |
457 | # drops it. It will leave the next character, even if it is another |
|
457 | # drops it. It will leave the next character, even if it is another | |
458 | # "\". |
|
458 | # "\". | |
459 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search |
|
459 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search | |
460 | if s and not _needsshellquote(s) and not _quotere.search(s): |
|
460 | if s and not _needsshellquote(s) and not _quotere.search(s): | |
461 | # "s" shouldn't have to be quoted |
|
461 | # "s" shouldn't have to be quoted | |
462 | return s |
|
462 | return s | |
463 | return b'"%s"' % _quotere.sub(br'\1\1\\\2', s) |
|
463 | return b'"%s"' % _quotere.sub(br'\1\1\\\2', s) | |
464 |
|
464 | |||
465 |
|
465 | |||
466 | def _unquote(s): |
|
466 | def _unquote(s): | |
467 | if s.startswith(b'"') and s.endswith(b'"'): |
|
467 | if s.startswith(b'"') and s.endswith(b'"'): | |
468 | return s[1:-1] |
|
468 | return s[1:-1] | |
469 | return s |
|
469 | return s | |
470 |
|
470 | |||
471 |
|
471 | |||
472 | def shellsplit(s): |
|
472 | def shellsplit(s): | |
473 | """Parse a command string in cmd.exe way (best-effort)""" |
|
473 | """Parse a command string in cmd.exe way (best-effort)""" | |
474 | return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False)) |
|
474 | return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False)) | |
475 |
|
475 | |||
476 |
|
476 | |||
477 | def quotecommand(cmd): |
|
|||
478 | """Build a command string suitable for os.popen* calls.""" |
|
|||
479 | return cmd |
|
|||
480 |
|
||||
481 |
|
||||
482 | # if you change this stub into a real check, please try to implement the |
|
477 | # if you change this stub into a real check, please try to implement the | |
483 | # username and groupname functions above, too. |
|
478 | # username and groupname functions above, too. | |
484 | def isowner(st): |
|
479 | def isowner(st): | |
485 | return True |
|
480 | return True | |
486 |
|
481 | |||
487 |
|
482 | |||
488 | def findexe(command): |
|
483 | def findexe(command): | |
489 | '''Find executable for command searching like cmd.exe does. |
|
484 | '''Find executable for command searching like cmd.exe does. | |
490 | If command is a basename then PATH is searched for command. |
|
485 | If command is a basename then PATH is searched for command. | |
491 | PATH isn't searched if command is an absolute or relative path. |
|
486 | PATH isn't searched if command is an absolute or relative path. | |
492 | An extension from PATHEXT is found and added if not present. |
|
487 | An extension from PATHEXT is found and added if not present. | |
493 | If command isn't found None is returned.''' |
|
488 | If command isn't found None is returned.''' | |
494 | pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') |
|
489 | pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') | |
495 | pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] |
|
490 | pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] | |
496 | if os.path.splitext(command)[1].lower() in pathexts: |
|
491 | if os.path.splitext(command)[1].lower() in pathexts: | |
497 | pathexts = [b''] |
|
492 | pathexts = [b''] | |
498 |
|
493 | |||
499 | def findexisting(pathcommand): |
|
494 | def findexisting(pathcommand): | |
500 | """Will append extension (if needed) and return existing file""" |
|
495 | """Will append extension (if needed) and return existing file""" | |
501 | for ext in pathexts: |
|
496 | for ext in pathexts: | |
502 | executable = pathcommand + ext |
|
497 | executable = pathcommand + ext | |
503 | if os.path.exists(executable): |
|
498 | if os.path.exists(executable): | |
504 | return executable |
|
499 | return executable | |
505 | return None |
|
500 | return None | |
506 |
|
501 | |||
507 | if pycompat.ossep in command: |
|
502 | if pycompat.ossep in command: | |
508 | return findexisting(command) |
|
503 | return findexisting(command) | |
509 |
|
504 | |||
510 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
505 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): | |
511 | executable = findexisting(os.path.join(path, command)) |
|
506 | executable = findexisting(os.path.join(path, command)) | |
512 | if executable is not None: |
|
507 | if executable is not None: | |
513 | return executable |
|
508 | return executable | |
514 | return findexisting(os.path.expanduser(os.path.expandvars(command))) |
|
509 | return findexisting(os.path.expanduser(os.path.expandvars(command))) | |
515 |
|
510 | |||
516 |
|
511 | |||
517 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
512 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | |
518 |
|
513 | |||
519 |
|
514 | |||
520 | def statfiles(files): |
|
515 | def statfiles(files): | |
521 | '''Stat each file in files. Yield each stat, or None if a file |
|
516 | '''Stat each file in files. Yield each stat, or None if a file | |
522 | does not exist or has a type we don't care about. |
|
517 | does not exist or has a type we don't care about. | |
523 |
|
518 | |||
524 | Cluster and cache stat per directory to minimize number of OS stat calls.''' |
|
519 | Cluster and cache stat per directory to minimize number of OS stat calls.''' | |
525 | dircache = {} # dirname -> filename -> status | None if file does not exist |
|
520 | dircache = {} # dirname -> filename -> status | None if file does not exist | |
526 | getkind = stat.S_IFMT |
|
521 | getkind = stat.S_IFMT | |
527 | for nf in files: |
|
522 | for nf in files: | |
528 | nf = normcase(nf) |
|
523 | nf = normcase(nf) | |
529 | dir, base = os.path.split(nf) |
|
524 | dir, base = os.path.split(nf) | |
530 | if not dir: |
|
525 | if not dir: | |
531 | dir = b'.' |
|
526 | dir = b'.' | |
532 | cache = dircache.get(dir, None) |
|
527 | cache = dircache.get(dir, None) | |
533 | if cache is None: |
|
528 | if cache is None: | |
534 | try: |
|
529 | try: | |
535 | dmap = { |
|
530 | dmap = { | |
536 | normcase(n): s |
|
531 | normcase(n): s | |
537 | for n, k, s in listdir(dir, True) |
|
532 | for n, k, s in listdir(dir, True) | |
538 | if getkind(s.st_mode) in _wantedkinds |
|
533 | if getkind(s.st_mode) in _wantedkinds | |
539 | } |
|
534 | } | |
540 | except OSError as err: |
|
535 | except OSError as err: | |
541 | # Python >= 2.5 returns ENOENT and adds winerror field |
|
536 | # Python >= 2.5 returns ENOENT and adds winerror field | |
542 | # EINVAL is raised if dir is not a directory. |
|
537 | # EINVAL is raised if dir is not a directory. | |
543 | if err.errno not in (errno.ENOENT, errno.EINVAL, errno.ENOTDIR): |
|
538 | if err.errno not in (errno.ENOENT, errno.EINVAL, errno.ENOTDIR): | |
544 | raise |
|
539 | raise | |
545 | dmap = {} |
|
540 | dmap = {} | |
546 | cache = dircache.setdefault(dir, dmap) |
|
541 | cache = dircache.setdefault(dir, dmap) | |
547 | yield cache.get(base, None) |
|
542 | yield cache.get(base, None) | |
548 |
|
543 | |||
549 |
|
544 | |||
550 | def username(uid=None): |
|
545 | def username(uid=None): | |
551 | """Return the name of the user with the given uid. |
|
546 | """Return the name of the user with the given uid. | |
552 |
|
547 | |||
553 | If uid is None, return the name of the current user.""" |
|
548 | If uid is None, return the name of the current user.""" | |
554 | if not uid: |
|
549 | if not uid: | |
555 | return pycompat.fsencode(getpass.getuser()) |
|
550 | return pycompat.fsencode(getpass.getuser()) | |
556 | return None |
|
551 | return None | |
557 |
|
552 | |||
558 |
|
553 | |||
559 | def groupname(gid=None): |
|
554 | def groupname(gid=None): | |
560 | """Return the name of the group with the given gid. |
|
555 | """Return the name of the group with the given gid. | |
561 |
|
556 | |||
562 | If gid is None, return the name of the current group.""" |
|
557 | If gid is None, return the name of the current group.""" | |
563 | return None |
|
558 | return None | |
564 |
|
559 | |||
565 |
|
560 | |||
566 | def readlink(pathname): |
|
561 | def readlink(pathname): | |
567 | return pycompat.fsencode(os.readlink(pycompat.fsdecode(pathname))) |
|
562 | return pycompat.fsencode(os.readlink(pycompat.fsdecode(pathname))) | |
568 |
|
563 | |||
569 |
|
564 | |||
570 | def removedirs(name): |
|
565 | def removedirs(name): | |
571 | """special version of os.removedirs that does not remove symlinked |
|
566 | """special version of os.removedirs that does not remove symlinked | |
572 | directories or junction points if they actually contain files""" |
|
567 | directories or junction points if they actually contain files""" | |
573 | if listdir(name): |
|
568 | if listdir(name): | |
574 | return |
|
569 | return | |
575 | os.rmdir(name) |
|
570 | os.rmdir(name) | |
576 | head, tail = os.path.split(name) |
|
571 | head, tail = os.path.split(name) | |
577 | if not tail: |
|
572 | if not tail: | |
578 | head, tail = os.path.split(head) |
|
573 | head, tail = os.path.split(head) | |
579 | while head and tail: |
|
574 | while head and tail: | |
580 | try: |
|
575 | try: | |
581 | if listdir(head): |
|
576 | if listdir(head): | |
582 | return |
|
577 | return | |
583 | os.rmdir(head) |
|
578 | os.rmdir(head) | |
584 | except (ValueError, OSError): |
|
579 | except (ValueError, OSError): | |
585 | break |
|
580 | break | |
586 | head, tail = os.path.split(head) |
|
581 | head, tail = os.path.split(head) | |
587 |
|
582 | |||
588 |
|
583 | |||
589 | def rename(src, dst): |
|
584 | def rename(src, dst): | |
590 | '''atomically rename file src to dst, replacing dst if it exists''' |
|
585 | '''atomically rename file src to dst, replacing dst if it exists''' | |
591 | try: |
|
586 | try: | |
592 | os.rename(src, dst) |
|
587 | os.rename(src, dst) | |
593 | except OSError as e: |
|
588 | except OSError as e: | |
594 | if e.errno != errno.EEXIST: |
|
589 | if e.errno != errno.EEXIST: | |
595 | raise |
|
590 | raise | |
596 | unlink(dst) |
|
591 | unlink(dst) | |
597 | os.rename(src, dst) |
|
592 | os.rename(src, dst) | |
598 |
|
593 | |||
599 |
|
594 | |||
600 | def gethgcmd(): |
|
595 | def gethgcmd(): | |
601 | return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]] |
|
596 | return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]] | |
602 |
|
597 | |||
603 |
|
598 | |||
604 | def groupmembers(name): |
|
599 | def groupmembers(name): | |
605 | # Don't support groups on Windows for now |
|
600 | # Don't support groups on Windows for now | |
606 | raise KeyError |
|
601 | raise KeyError | |
607 |
|
602 | |||
608 |
|
603 | |||
609 | def isexec(f): |
|
604 | def isexec(f): | |
610 | return False |
|
605 | return False | |
611 |
|
606 | |||
612 |
|
607 | |||
613 | class cachestat(object): |
|
608 | class cachestat(object): | |
614 | def __init__(self, path): |
|
609 | def __init__(self, path): | |
615 | pass |
|
610 | pass | |
616 |
|
611 | |||
617 | def cacheable(self): |
|
612 | def cacheable(self): | |
618 | return False |
|
613 | return False | |
619 |
|
614 | |||
620 |
|
615 | |||
621 | def lookupreg(key, valname=None, scope=None): |
|
616 | def lookupreg(key, valname=None, scope=None): | |
622 | ''' Look up a key/value name in the Windows registry. |
|
617 | ''' Look up a key/value name in the Windows registry. | |
623 |
|
618 | |||
624 | valname: value name. If unspecified, the default value for the key |
|
619 | valname: value name. If unspecified, the default value for the key | |
625 | is used. |
|
620 | is used. | |
626 | scope: optionally specify scope for registry lookup, this can be |
|
621 | scope: optionally specify scope for registry lookup, this can be | |
627 | a sequence of scopes to look up in order. Default (CURRENT_USER, |
|
622 | a sequence of scopes to look up in order. Default (CURRENT_USER, | |
628 | LOCAL_MACHINE). |
|
623 | LOCAL_MACHINE). | |
629 | ''' |
|
624 | ''' | |
630 | if scope is None: |
|
625 | if scope is None: | |
631 | scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) |
|
626 | scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) | |
632 | elif not isinstance(scope, (list, tuple)): |
|
627 | elif not isinstance(scope, (list, tuple)): | |
633 | scope = (scope,) |
|
628 | scope = (scope,) | |
634 | for s in scope: |
|
629 | for s in scope: | |
635 | try: |
|
630 | try: | |
636 | with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: |
|
631 | with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: | |
637 | name = valname and encoding.strfromlocal(valname) or valname |
|
632 | name = valname and encoding.strfromlocal(valname) or valname | |
638 | val = winreg.QueryValueEx(hkey, name)[0] |
|
633 | val = winreg.QueryValueEx(hkey, name)[0] | |
639 | # never let a Unicode string escape into the wild |
|
634 | # never let a Unicode string escape into the wild | |
640 | return encoding.unitolocal(val) |
|
635 | return encoding.unitolocal(val) | |
641 | except EnvironmentError: |
|
636 | except EnvironmentError: | |
642 | pass |
|
637 | pass | |
643 |
|
638 | |||
644 |
|
639 | |||
645 | expandglobs = True |
|
640 | expandglobs = True | |
646 |
|
641 | |||
647 |
|
642 | |||
648 | def statislink(st): |
|
643 | def statislink(st): | |
649 | '''check whether a stat result is a symlink''' |
|
644 | '''check whether a stat result is a symlink''' | |
650 | return False |
|
645 | return False | |
651 |
|
646 | |||
652 |
|
647 | |||
653 | def statisexec(st): |
|
648 | def statisexec(st): | |
654 | '''check whether a stat result is an executable file''' |
|
649 | '''check whether a stat result is an executable file''' | |
655 | return False |
|
650 | return False | |
656 |
|
651 | |||
657 |
|
652 | |||
658 | def poll(fds): |
|
653 | def poll(fds): | |
659 | # see posix.py for description |
|
654 | # see posix.py for description | |
660 | raise NotImplementedError() |
|
655 | raise NotImplementedError() | |
661 |
|
656 | |||
662 |
|
657 | |||
663 | def readpipe(pipe): |
|
658 | def readpipe(pipe): | |
664 | """Read all available data from a pipe.""" |
|
659 | """Read all available data from a pipe.""" | |
665 | chunks = [] |
|
660 | chunks = [] | |
666 | while True: |
|
661 | while True: | |
667 | size = win32.peekpipe(pipe) |
|
662 | size = win32.peekpipe(pipe) | |
668 | if not size: |
|
663 | if not size: | |
669 | break |
|
664 | break | |
670 |
|
665 | |||
671 | s = pipe.read(size) |
|
666 | s = pipe.read(size) | |
672 | if not s: |
|
667 | if not s: | |
673 | break |
|
668 | break | |
674 | chunks.append(s) |
|
669 | chunks.append(s) | |
675 |
|
670 | |||
676 | return b''.join(chunks) |
|
671 | return b''.join(chunks) | |
677 |
|
672 | |||
678 |
|
673 | |||
679 | def bindunixsocket(sock, path): |
|
674 | def bindunixsocket(sock, path): | |
680 | raise NotImplementedError('unsupported platform') |
|
675 | raise NotImplementedError('unsupported platform') |
General Comments 0
You need to be logged in to leave comments.
Login now