Show More
@@ -1,47 +1,47 | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 | 2 | # |
|
3 | 3 | # Dumps output generated by Mercurial's command server in a formatted style to a |
|
4 | 4 | # given file or stderr if '-' is specified. Output is also written in its raw |
|
5 | 5 | # format to stdout. |
|
6 | 6 | # |
|
7 | 7 | # $ ./hg serve --cmds pipe | ./contrib/debugcmdserver.py - |
|
8 | 8 | # o, 52 -> 'capabilities: getencoding runcommand\nencoding: UTF-8' |
|
9 | 9 | |
|
10 | 10 | import sys, struct |
|
11 | 11 | |
|
12 | 12 | if len(sys.argv) != 2: |
|
13 | 13 | print 'usage: debugcmdserver.py FILE' |
|
14 | 14 | sys.exit(1) |
|
15 | 15 | |
|
16 | 16 | outputfmt = '>cI' |
|
17 | 17 | outputfmtsize = struct.calcsize(outputfmt) |
|
18 | 18 | |
|
19 | 19 | if sys.argv[1] == '-': |
|
20 | 20 | log = sys.stderr |
|
21 | 21 | else: |
|
22 | 22 | log = open(sys.argv[1], 'a') |
|
23 | 23 | |
|
24 | 24 | def read(size): |
|
25 | 25 | data = sys.stdin.read(size) |
|
26 | 26 | if not data: |
|
27 |
raise EOFError |
|
|
27 | raise EOFError | |
|
28 | 28 | sys.stdout.write(data) |
|
29 | 29 | sys.stdout.flush() |
|
30 | 30 | return data |
|
31 | 31 | |
|
32 | 32 | try: |
|
33 | 33 | while True: |
|
34 | 34 | header = read(outputfmtsize) |
|
35 | 35 | channel, length = struct.unpack(outputfmt, header) |
|
36 | 36 | log.write('%s, %-4d' % (channel, length)) |
|
37 | 37 | if channel in 'IL': |
|
38 | 38 | log.write(' -> waiting for input\n') |
|
39 | 39 | else: |
|
40 | 40 | data = read(length) |
|
41 | 41 | log.write(' -> %r\n' % data) |
|
42 | 42 | log.flush() |
|
43 | 43 | except EOFError: |
|
44 | 44 | pass |
|
45 | 45 | finally: |
|
46 | 46 | if log != sys.stderr: |
|
47 | 47 | log.close() |
@@ -1,445 +1,445 | |||
|
1 | 1 | # common.py - common code for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import base64, errno |
|
9 | 9 | import os |
|
10 | 10 | import cPickle as pickle |
|
11 | 11 | from mercurial import util |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | |
|
14 | 14 | propertycache = util.propertycache |
|
15 | 15 | |
|
16 | 16 | def encodeargs(args): |
|
17 | 17 | def encodearg(s): |
|
18 | 18 | lines = base64.encodestring(s) |
|
19 | 19 | lines = [l.splitlines()[0] for l in lines] |
|
20 | 20 | return ''.join(lines) |
|
21 | 21 | |
|
22 | 22 | s = pickle.dumps(args) |
|
23 | 23 | return encodearg(s) |
|
24 | 24 | |
|
25 | 25 | def decodeargs(s): |
|
26 | 26 | s = base64.decodestring(s) |
|
27 | 27 | return pickle.loads(s) |
|
28 | 28 | |
|
29 | 29 | class MissingTool(Exception): |
|
30 | 30 | pass |
|
31 | 31 | |
|
32 | 32 | def checktool(exe, name=None, abort=True): |
|
33 | 33 | name = name or exe |
|
34 | 34 | if not util.findexe(exe): |
|
35 | 35 | exc = abort and util.Abort or MissingTool |
|
36 | 36 | raise exc(_('cannot find required "%s" tool') % name) |
|
37 | 37 | |
|
38 | 38 | class NoRepo(Exception): |
|
39 | 39 | pass |
|
40 | 40 | |
|
41 | 41 | SKIPREV = 'SKIP' |
|
42 | 42 | |
|
43 | 43 | class commit(object): |
|
44 | 44 | def __init__(self, author, date, desc, parents, branch=None, rev=None, |
|
45 | 45 | extra={}, sortkey=None): |
|
46 | 46 | self.author = author or 'unknown' |
|
47 | 47 | self.date = date or '0 0' |
|
48 | 48 | self.desc = desc |
|
49 | 49 | self.parents = parents |
|
50 | 50 | self.branch = branch |
|
51 | 51 | self.rev = rev |
|
52 | 52 | self.extra = extra |
|
53 | 53 | self.sortkey = sortkey |
|
54 | 54 | |
|
55 | 55 | class converter_source(object): |
|
56 | 56 | """Conversion source interface""" |
|
57 | 57 | |
|
58 | 58 | def __init__(self, ui, path=None, rev=None): |
|
59 | 59 | """Initialize conversion source (or raise NoRepo("message") |
|
60 | 60 | exception if path is not a valid repository)""" |
|
61 | 61 | self.ui = ui |
|
62 | 62 | self.path = path |
|
63 | 63 | self.rev = rev |
|
64 | 64 | |
|
65 | 65 | self.encoding = 'utf-8' |
|
66 | 66 | |
|
67 | 67 | def before(self): |
|
68 | 68 | pass |
|
69 | 69 | |
|
70 | 70 | def after(self): |
|
71 | 71 | pass |
|
72 | 72 | |
|
73 | 73 | def setrevmap(self, revmap): |
|
74 | 74 | """set the map of already-converted revisions""" |
|
75 | 75 | pass |
|
76 | 76 | |
|
77 | 77 | def getheads(self): |
|
78 | 78 | """Return a list of this repository's heads""" |
|
79 |
raise NotImplementedError |
|
|
79 | raise NotImplementedError | |
|
80 | 80 | |
|
81 | 81 | def getfile(self, name, rev): |
|
82 | 82 | """Return a pair (data, mode) where data is the file content |
|
83 | 83 | as a string and mode one of '', 'x' or 'l'. rev is the |
|
84 | 84 | identifier returned by a previous call to getchanges(). Raise |
|
85 | 85 | IOError to indicate that name was deleted in rev. |
|
86 | 86 | """ |
|
87 |
raise NotImplementedError |
|
|
87 | raise NotImplementedError | |
|
88 | 88 | |
|
89 | 89 | def getchanges(self, version): |
|
90 | 90 | """Returns a tuple of (files, copies). |
|
91 | 91 | |
|
92 | 92 | files is a sorted list of (filename, id) tuples for all files |
|
93 | 93 | changed between version and its first parent returned by |
|
94 | 94 | getcommit(). id is the source revision id of the file. |
|
95 | 95 | |
|
96 | 96 | copies is a dictionary of dest: source |
|
97 | 97 | """ |
|
98 |
raise NotImplementedError |
|
|
98 | raise NotImplementedError | |
|
99 | 99 | |
|
100 | 100 | def getcommit(self, version): |
|
101 | 101 | """Return the commit object for version""" |
|
102 |
raise NotImplementedError |
|
|
102 | raise NotImplementedError | |
|
103 | 103 | |
|
104 | 104 | def gettags(self): |
|
105 | 105 | """Return the tags as a dictionary of name: revision |
|
106 | 106 | |
|
107 | 107 | Tag names must be UTF-8 strings. |
|
108 | 108 | """ |
|
109 |
raise NotImplementedError |
|
|
109 | raise NotImplementedError | |
|
110 | 110 | |
|
111 | 111 | def recode(self, s, encoding=None): |
|
112 | 112 | if not encoding: |
|
113 | 113 | encoding = self.encoding or 'utf-8' |
|
114 | 114 | |
|
115 | 115 | if isinstance(s, unicode): |
|
116 | 116 | return s.encode("utf-8") |
|
117 | 117 | try: |
|
118 | 118 | return s.decode(encoding).encode("utf-8") |
|
119 | 119 | except: |
|
120 | 120 | try: |
|
121 | 121 | return s.decode("latin-1").encode("utf-8") |
|
122 | 122 | except: |
|
123 | 123 | return s.decode(encoding, "replace").encode("utf-8") |
|
124 | 124 | |
|
125 | 125 | def getchangedfiles(self, rev, i): |
|
126 | 126 | """Return the files changed by rev compared to parent[i]. |
|
127 | 127 | |
|
128 | 128 | i is an index selecting one of the parents of rev. The return |
|
129 | 129 | value should be the list of files that are different in rev and |
|
130 | 130 | this parent. |
|
131 | 131 | |
|
132 | 132 | If rev has no parents, i is None. |
|
133 | 133 | |
|
134 | 134 | This function is only needed to support --filemap |
|
135 | 135 | """ |
|
136 |
raise NotImplementedError |
|
|
136 | raise NotImplementedError | |
|
137 | 137 | |
|
138 | 138 | def converted(self, rev, sinkrev): |
|
139 | 139 | '''Notify the source that a revision has been converted.''' |
|
140 | 140 | pass |
|
141 | 141 | |
|
142 | 142 | def hasnativeorder(self): |
|
143 | 143 | """Return true if this source has a meaningful, native revision |
|
144 | 144 | order. For instance, Mercurial revisions are store sequentially |
|
145 | 145 | while there is no such global ordering with Darcs. |
|
146 | 146 | """ |
|
147 | 147 | return False |
|
148 | 148 | |
|
149 | 149 | def lookuprev(self, rev): |
|
150 | 150 | """If rev is a meaningful revision reference in source, return |
|
151 | 151 | the referenced identifier in the same format used by getcommit(). |
|
152 | 152 | return None otherwise. |
|
153 | 153 | """ |
|
154 | 154 | return None |
|
155 | 155 | |
|
156 | 156 | def getbookmarks(self): |
|
157 | 157 | """Return the bookmarks as a dictionary of name: revision |
|
158 | 158 | |
|
159 | 159 | Bookmark names are to be UTF-8 strings. |
|
160 | 160 | """ |
|
161 | 161 | return {} |
|
162 | 162 | |
|
163 | 163 | class converter_sink(object): |
|
164 | 164 | """Conversion sink (target) interface""" |
|
165 | 165 | |
|
166 | 166 | def __init__(self, ui, path): |
|
167 | 167 | """Initialize conversion sink (or raise NoRepo("message") |
|
168 | 168 | exception if path is not a valid repository) |
|
169 | 169 | |
|
170 | 170 | created is a list of paths to remove if a fatal error occurs |
|
171 | 171 | later""" |
|
172 | 172 | self.ui = ui |
|
173 | 173 | self.path = path |
|
174 | 174 | self.created = [] |
|
175 | 175 | |
|
176 | 176 | def getheads(self): |
|
177 | 177 | """Return a list of this repository's heads""" |
|
178 |
raise NotImplementedError |
|
|
178 | raise NotImplementedError | |
|
179 | 179 | |
|
180 | 180 | def revmapfile(self): |
|
181 | 181 | """Path to a file that will contain lines |
|
182 | 182 | source_rev_id sink_rev_id |
|
183 | 183 | mapping equivalent revision identifiers for each system.""" |
|
184 |
raise NotImplementedError |
|
|
184 | raise NotImplementedError | |
|
185 | 185 | |
|
186 | 186 | def authorfile(self): |
|
187 | 187 | """Path to a file that will contain lines |
|
188 | 188 | srcauthor=dstauthor |
|
189 | 189 | mapping equivalent authors identifiers for each system.""" |
|
190 | 190 | return None |
|
191 | 191 | |
|
192 | 192 | def putcommit(self, files, copies, parents, commit, source, revmap): |
|
193 | 193 | """Create a revision with all changed files listed in 'files' |
|
194 | 194 | and having listed parents. 'commit' is a commit object |
|
195 | 195 | containing at a minimum the author, date, and message for this |
|
196 | 196 | changeset. 'files' is a list of (path, version) tuples, |
|
197 | 197 | 'copies' is a dictionary mapping destinations to sources, |
|
198 | 198 | 'source' is the source repository, and 'revmap' is a mapfile |
|
199 | 199 | of source revisions to converted revisions. Only getfile() and |
|
200 | 200 | lookuprev() should be called on 'source'. |
|
201 | 201 | |
|
202 | 202 | Note that the sink repository is not told to update itself to |
|
203 | 203 | a particular revision (or even what that revision would be) |
|
204 | 204 | before it receives the file data. |
|
205 | 205 | """ |
|
206 |
raise NotImplementedError |
|
|
206 | raise NotImplementedError | |
|
207 | 207 | |
|
208 | 208 | def puttags(self, tags): |
|
209 | 209 | """Put tags into sink. |
|
210 | 210 | |
|
211 | 211 | tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string. |
|
212 | 212 | Return a pair (tag_revision, tag_parent_revision), or (None, None) |
|
213 | 213 | if nothing was changed. |
|
214 | 214 | """ |
|
215 |
raise NotImplementedError |
|
|
215 | raise NotImplementedError | |
|
216 | 216 | |
|
217 | 217 | def setbranch(self, branch, pbranches): |
|
218 | 218 | """Set the current branch name. Called before the first putcommit |
|
219 | 219 | on the branch. |
|
220 | 220 | branch: branch name for subsequent commits |
|
221 | 221 | pbranches: (converted parent revision, parent branch) tuples""" |
|
222 | 222 | pass |
|
223 | 223 | |
|
224 | 224 | def setfilemapmode(self, active): |
|
225 | 225 | """Tell the destination that we're using a filemap |
|
226 | 226 | |
|
227 | 227 | Some converter_sources (svn in particular) can claim that a file |
|
228 | 228 | was changed in a revision, even if there was no change. This method |
|
229 | 229 | tells the destination that we're using a filemap and that it should |
|
230 | 230 | filter empty revisions. |
|
231 | 231 | """ |
|
232 | 232 | pass |
|
233 | 233 | |
|
234 | 234 | def before(self): |
|
235 | 235 | pass |
|
236 | 236 | |
|
237 | 237 | def after(self): |
|
238 | 238 | pass |
|
239 | 239 | |
|
240 | 240 | def putbookmarks(self, bookmarks): |
|
241 | 241 | """Put bookmarks into sink. |
|
242 | 242 | |
|
243 | 243 | bookmarks: {bookmarkname: sink_rev_id, ...} |
|
244 | 244 | where bookmarkname is an UTF-8 string. |
|
245 | 245 | """ |
|
246 | 246 | pass |
|
247 | 247 | |
|
248 | 248 | def hascommit(self, rev): |
|
249 | 249 | """Return True if the sink contains rev""" |
|
250 |
raise NotImplementedError |
|
|
250 | raise NotImplementedError | |
|
251 | 251 | |
|
252 | 252 | class commandline(object): |
|
253 | 253 | def __init__(self, ui, command): |
|
254 | 254 | self.ui = ui |
|
255 | 255 | self.command = command |
|
256 | 256 | |
|
257 | 257 | def prerun(self): |
|
258 | 258 | pass |
|
259 | 259 | |
|
260 | 260 | def postrun(self): |
|
261 | 261 | pass |
|
262 | 262 | |
|
263 | 263 | def _cmdline(self, cmd, closestdin, *args, **kwargs): |
|
264 | 264 | cmdline = [self.command, cmd] + list(args) |
|
265 | 265 | for k, v in kwargs.iteritems(): |
|
266 | 266 | if len(k) == 1: |
|
267 | 267 | cmdline.append('-' + k) |
|
268 | 268 | else: |
|
269 | 269 | cmdline.append('--' + k.replace('_', '-')) |
|
270 | 270 | try: |
|
271 | 271 | if len(k) == 1: |
|
272 | 272 | cmdline.append('' + v) |
|
273 | 273 | else: |
|
274 | 274 | cmdline[-1] += '=' + v |
|
275 | 275 | except TypeError: |
|
276 | 276 | pass |
|
277 | 277 | cmdline = [util.shellquote(arg) for arg in cmdline] |
|
278 | 278 | if not self.ui.debugflag: |
|
279 | 279 | cmdline += ['2>', util.nulldev] |
|
280 | 280 | if closestdin: |
|
281 | 281 | cmdline += ['<', util.nulldev] |
|
282 | 282 | cmdline = ' '.join(cmdline) |
|
283 | 283 | return cmdline |
|
284 | 284 | |
|
285 | 285 | def _run(self, cmd, *args, **kwargs): |
|
286 | 286 | return self._dorun(util.popen, cmd, True, *args, **kwargs) |
|
287 | 287 | |
|
288 | 288 | def _run2(self, cmd, *args, **kwargs): |
|
289 | 289 | return self._dorun(util.popen2, cmd, False, *args, **kwargs) |
|
290 | 290 | |
|
291 | 291 | def _dorun(self, openfunc, cmd, closestdin, *args, **kwargs): |
|
292 | 292 | cmdline = self._cmdline(cmd, closestdin, *args, **kwargs) |
|
293 | 293 | self.ui.debug('running: %s\n' % (cmdline,)) |
|
294 | 294 | self.prerun() |
|
295 | 295 | try: |
|
296 | 296 | return openfunc(cmdline) |
|
297 | 297 | finally: |
|
298 | 298 | self.postrun() |
|
299 | 299 | |
|
300 | 300 | def run(self, cmd, *args, **kwargs): |
|
301 | 301 | fp = self._run(cmd, *args, **kwargs) |
|
302 | 302 | output = fp.read() |
|
303 | 303 | self.ui.debug(output) |
|
304 | 304 | return output, fp.close() |
|
305 | 305 | |
|
306 | 306 | def runlines(self, cmd, *args, **kwargs): |
|
307 | 307 | fp = self._run(cmd, *args, **kwargs) |
|
308 | 308 | output = fp.readlines() |
|
309 | 309 | self.ui.debug(''.join(output)) |
|
310 | 310 | return output, fp.close() |
|
311 | 311 | |
|
312 | 312 | def checkexit(self, status, output=''): |
|
313 | 313 | if status: |
|
314 | 314 | if output: |
|
315 | 315 | self.ui.warn(_('%s error:\n') % self.command) |
|
316 | 316 | self.ui.warn(output) |
|
317 | 317 | msg = util.explainexit(status)[0] |
|
318 | 318 | raise util.Abort('%s %s' % (self.command, msg)) |
|
319 | 319 | |
|
320 | 320 | def run0(self, cmd, *args, **kwargs): |
|
321 | 321 | output, status = self.run(cmd, *args, **kwargs) |
|
322 | 322 | self.checkexit(status, output) |
|
323 | 323 | return output |
|
324 | 324 | |
|
325 | 325 | def runlines0(self, cmd, *args, **kwargs): |
|
326 | 326 | output, status = self.runlines(cmd, *args, **kwargs) |
|
327 | 327 | self.checkexit(status, ''.join(output)) |
|
328 | 328 | return output |
|
329 | 329 | |
|
330 | 330 | @propertycache |
|
331 | 331 | def argmax(self): |
|
332 | 332 | # POSIX requires at least 4096 bytes for ARG_MAX |
|
333 | 333 | argmax = 4096 |
|
334 | 334 | try: |
|
335 | 335 | argmax = os.sysconf("SC_ARG_MAX") |
|
336 | 336 | except: |
|
337 | 337 | pass |
|
338 | 338 | |
|
339 | 339 | # Windows shells impose their own limits on command line length, |
|
340 | 340 | # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes |
|
341 | 341 | # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for |
|
342 | 342 | # details about cmd.exe limitations. |
|
343 | 343 | |
|
344 | 344 | # Since ARG_MAX is for command line _and_ environment, lower our limit |
|
345 | 345 | # (and make happy Windows shells while doing this). |
|
346 | 346 | return argmax // 2 - 1 |
|
347 | 347 | |
|
348 | 348 | def limit_arglist(self, arglist, cmd, closestdin, *args, **kwargs): |
|
349 | 349 | cmdlen = len(self._cmdline(cmd, closestdin, *args, **kwargs)) |
|
350 | 350 | limit = self.argmax - cmdlen |
|
351 | 351 | bytes = 0 |
|
352 | 352 | fl = [] |
|
353 | 353 | for fn in arglist: |
|
354 | 354 | b = len(fn) + 3 |
|
355 | 355 | if bytes + b < limit or len(fl) == 0: |
|
356 | 356 | fl.append(fn) |
|
357 | 357 | bytes += b |
|
358 | 358 | else: |
|
359 | 359 | yield fl |
|
360 | 360 | fl = [fn] |
|
361 | 361 | bytes = b |
|
362 | 362 | if fl: |
|
363 | 363 | yield fl |
|
364 | 364 | |
|
365 | 365 | def xargs(self, arglist, cmd, *args, **kwargs): |
|
366 | 366 | for l in self.limit_arglist(arglist, cmd, True, *args, **kwargs): |
|
367 | 367 | self.run0(cmd, *(list(args) + l), **kwargs) |
|
368 | 368 | |
|
369 | 369 | class mapfile(dict): |
|
370 | 370 | def __init__(self, ui, path): |
|
371 | 371 | super(mapfile, self).__init__() |
|
372 | 372 | self.ui = ui |
|
373 | 373 | self.path = path |
|
374 | 374 | self.fp = None |
|
375 | 375 | self.order = [] |
|
376 | 376 | self._read() |
|
377 | 377 | |
|
378 | 378 | def _read(self): |
|
379 | 379 | if not self.path: |
|
380 | 380 | return |
|
381 | 381 | try: |
|
382 | 382 | fp = open(self.path, 'r') |
|
383 | 383 | except IOError, err: |
|
384 | 384 | if err.errno != errno.ENOENT: |
|
385 | 385 | raise |
|
386 | 386 | return |
|
387 | 387 | for i, line in enumerate(fp): |
|
388 | 388 | line = line.splitlines()[0].rstrip() |
|
389 | 389 | if not line: |
|
390 | 390 | # Ignore blank lines |
|
391 | 391 | continue |
|
392 | 392 | try: |
|
393 | 393 | key, value = line.rsplit(' ', 1) |
|
394 | 394 | except ValueError: |
|
395 | 395 | raise util.Abort( |
|
396 | 396 | _('syntax error in %s(%d): key/value pair expected') |
|
397 | 397 | % (self.path, i + 1)) |
|
398 | 398 | if key not in self: |
|
399 | 399 | self.order.append(key) |
|
400 | 400 | super(mapfile, self).__setitem__(key, value) |
|
401 | 401 | fp.close() |
|
402 | 402 | |
|
403 | 403 | def __setitem__(self, key, value): |
|
404 | 404 | if self.fp is None: |
|
405 | 405 | try: |
|
406 | 406 | self.fp = open(self.path, 'a') |
|
407 | 407 | except IOError, err: |
|
408 | 408 | raise util.Abort(_('could not open map file %r: %s') % |
|
409 | 409 | (self.path, err.strerror)) |
|
410 | 410 | self.fp.write('%s %s\n' % (key, value)) |
|
411 | 411 | self.fp.flush() |
|
412 | 412 | super(mapfile, self).__setitem__(key, value) |
|
413 | 413 | |
|
414 | 414 | def close(self): |
|
415 | 415 | if self.fp: |
|
416 | 416 | self.fp.close() |
|
417 | 417 | self.fp = None |
|
418 | 418 | |
|
419 | 419 | def parsesplicemap(path): |
|
420 | 420 | """Parse a splicemap, return a child/parents dictionary.""" |
|
421 | 421 | if not path: |
|
422 | 422 | return {} |
|
423 | 423 | m = {} |
|
424 | 424 | try: |
|
425 | 425 | fp = open(path, 'r') |
|
426 | 426 | for i, line in enumerate(fp): |
|
427 | 427 | line = line.splitlines()[0].rstrip() |
|
428 | 428 | if not line: |
|
429 | 429 | # Ignore blank lines |
|
430 | 430 | continue |
|
431 | 431 | try: |
|
432 | 432 | child, parents = line.split(' ', 1) |
|
433 | 433 | parents = parents.replace(',', ' ').split() |
|
434 | 434 | except ValueError: |
|
435 | 435 | raise util.Abort(_('syntax error in %s(%d): child parent1' |
|
436 | 436 | '[,parent2] expected') % (path, i + 1)) |
|
437 | 437 | pp = [] |
|
438 | 438 | for p in parents: |
|
439 | 439 | if p not in pp: |
|
440 | 440 | pp.append(p) |
|
441 | 441 | m[child] = pp |
|
442 | 442 | except IOError, e: |
|
443 | 443 | if e.errno != errno.ENOENT: |
|
444 | 444 | raise |
|
445 | 445 | return m |
@@ -1,217 +1,217 | |||
|
1 | 1 | # git.py - git support for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import os |
|
9 | 9 | from mercurial import util |
|
10 | 10 | from mercurial.node import hex, nullid |
|
11 | 11 | from mercurial.i18n import _ |
|
12 | 12 | |
|
13 | 13 | from common import NoRepo, commit, converter_source, checktool |
|
14 | 14 | |
|
15 | 15 | class convert_git(converter_source): |
|
16 | 16 | # Windows does not support GIT_DIR= construct while other systems |
|
17 | 17 | # cannot remove environment variable. Just assume none have |
|
18 | 18 | # both issues. |
|
19 | 19 | if util.safehasattr(os, 'unsetenv'): |
|
20 | 20 | def gitopen(self, s, noerr=False): |
|
21 | 21 | prevgitdir = os.environ.get('GIT_DIR') |
|
22 | 22 | os.environ['GIT_DIR'] = self.path |
|
23 | 23 | try: |
|
24 | 24 | if noerr: |
|
25 | 25 | (stdin, stdout, stderr) = util.popen3(s) |
|
26 | 26 | return stdout |
|
27 | 27 | else: |
|
28 | 28 | return util.popen(s, 'rb') |
|
29 | 29 | finally: |
|
30 | 30 | if prevgitdir is None: |
|
31 | 31 | del os.environ['GIT_DIR'] |
|
32 | 32 | else: |
|
33 | 33 | os.environ['GIT_DIR'] = prevgitdir |
|
34 | 34 | else: |
|
35 | 35 | def gitopen(self, s, noerr=False): |
|
36 | 36 | if noerr: |
|
37 | 37 | (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s)) |
|
38 | 38 | return so |
|
39 | 39 | else: |
|
40 | 40 | return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb') |
|
41 | 41 | |
|
42 | 42 | def gitread(self, s): |
|
43 | 43 | fh = self.gitopen(s) |
|
44 | 44 | data = fh.read() |
|
45 | 45 | return data, fh.close() |
|
46 | 46 | |
|
47 | 47 | def __init__(self, ui, path, rev=None): |
|
48 | 48 | super(convert_git, self).__init__(ui, path, rev=rev) |
|
49 | 49 | |
|
50 | 50 | if os.path.isdir(path + "/.git"): |
|
51 | 51 | path += "/.git" |
|
52 | 52 | if not os.path.exists(path + "/objects"): |
|
53 | 53 | raise NoRepo(_("%s does not look like a Git repository") % path) |
|
54 | 54 | |
|
55 | 55 | checktool('git', 'git') |
|
56 | 56 | |
|
57 | 57 | self.path = path |
|
58 | 58 | |
|
59 | 59 | def getheads(self): |
|
60 | 60 | if not self.rev: |
|
61 | 61 | heads, ret = self.gitread('git rev-parse --branches --remotes') |
|
62 | 62 | heads = heads.splitlines() |
|
63 | 63 | else: |
|
64 | 64 | heads, ret = self.gitread("git rev-parse --verify %s" % self.rev) |
|
65 | 65 | heads = [heads[:-1]] |
|
66 | 66 | if ret: |
|
67 | 67 | raise util.Abort(_('cannot retrieve git heads')) |
|
68 | 68 | return heads |
|
69 | 69 | |
|
70 | 70 | def catfile(self, rev, type): |
|
71 | 71 | if rev == hex(nullid): |
|
72 |
raise IOError |
|
|
72 | raise IOError | |
|
73 | 73 | data, ret = self.gitread("git cat-file %s %s" % (type, rev)) |
|
74 | 74 | if ret: |
|
75 | 75 | raise util.Abort(_('cannot read %r object at %s') % (type, rev)) |
|
76 | 76 | return data |
|
77 | 77 | |
|
78 | 78 | def getfile(self, name, rev): |
|
79 | 79 | data = self.catfile(rev, "blob") |
|
80 | 80 | mode = self.modecache[(name, rev)] |
|
81 | 81 | return data, mode |
|
82 | 82 | |
|
83 | 83 | def getchanges(self, version): |
|
84 | 84 | self.modecache = {} |
|
85 | 85 | fh = self.gitopen("git diff-tree -z --root -m -r %s" % version) |
|
86 | 86 | changes = [] |
|
87 | 87 | seen = set() |
|
88 | 88 | entry = None |
|
89 | 89 | for l in fh.read().split('\x00'): |
|
90 | 90 | if not entry: |
|
91 | 91 | if not l.startswith(':'): |
|
92 | 92 | continue |
|
93 | 93 | entry = l |
|
94 | 94 | continue |
|
95 | 95 | f = l |
|
96 | 96 | if f not in seen: |
|
97 | 97 | seen.add(f) |
|
98 | 98 | entry = entry.split() |
|
99 | 99 | h = entry[3] |
|
100 | 100 | if entry[1] == '160000': |
|
101 | 101 | raise util.Abort('git submodules are not supported!') |
|
102 | 102 | p = (entry[1] == "100755") |
|
103 | 103 | s = (entry[1] == "120000") |
|
104 | 104 | self.modecache[(f, h)] = (p and "x") or (s and "l") or "" |
|
105 | 105 | changes.append((f, h)) |
|
106 | 106 | entry = None |
|
107 | 107 | if fh.close(): |
|
108 | 108 | raise util.Abort(_('cannot read changes in %s') % version) |
|
109 | 109 | return (changes, {}) |
|
110 | 110 | |
|
111 | 111 | def getcommit(self, version): |
|
112 | 112 | c = self.catfile(version, "commit") # read the commit hash |
|
113 | 113 | end = c.find("\n\n") |
|
114 | 114 | message = c[end + 2:] |
|
115 | 115 | message = self.recode(message) |
|
116 | 116 | l = c[:end].splitlines() |
|
117 | 117 | parents = [] |
|
118 | 118 | author = committer = None |
|
119 | 119 | for e in l[1:]: |
|
120 | 120 | n, v = e.split(" ", 1) |
|
121 | 121 | if n == "author": |
|
122 | 122 | p = v.split() |
|
123 | 123 | tm, tz = p[-2:] |
|
124 | 124 | author = " ".join(p[:-2]) |
|
125 | 125 | if author[0] == "<": author = author[1:-1] |
|
126 | 126 | author = self.recode(author) |
|
127 | 127 | if n == "committer": |
|
128 | 128 | p = v.split() |
|
129 | 129 | tm, tz = p[-2:] |
|
130 | 130 | committer = " ".join(p[:-2]) |
|
131 | 131 | if committer[0] == "<": committer = committer[1:-1] |
|
132 | 132 | committer = self.recode(committer) |
|
133 | 133 | if n == "parent": |
|
134 | 134 | parents.append(v) |
|
135 | 135 | |
|
136 | 136 | if committer and committer != author: |
|
137 | 137 | message += "\ncommitter: %s\n" % committer |
|
138 | 138 | tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:] |
|
139 | 139 | tz = -int(tzs) * (int(tzh) * 3600 + int(tzm)) |
|
140 | 140 | date = tm + " " + str(tz) |
|
141 | 141 | |
|
142 | 142 | c = commit(parents=parents, date=date, author=author, desc=message, |
|
143 | 143 | rev=version) |
|
144 | 144 | return c |
|
145 | 145 | |
|
146 | 146 | def gettags(self): |
|
147 | 147 | tags = {} |
|
148 | 148 | alltags = {} |
|
149 | 149 | fh = self.gitopen('git ls-remote --tags "%s"' % self.path) |
|
150 | 150 | prefix = 'refs/tags/' |
|
151 | 151 | |
|
152 | 152 | # Build complete list of tags, both annotated and bare ones |
|
153 | 153 | for line in fh: |
|
154 | 154 | line = line.strip() |
|
155 | 155 | node, tag = line.split(None, 1) |
|
156 | 156 | if not tag.startswith(prefix): |
|
157 | 157 | continue |
|
158 | 158 | alltags[tag[len(prefix):]] = node |
|
159 | 159 | if fh.close(): |
|
160 | 160 | raise util.Abort(_('cannot read tags from %s') % self.path) |
|
161 | 161 | |
|
162 | 162 | # Filter out tag objects for annotated tag refs |
|
163 | 163 | for tag in alltags: |
|
164 | 164 | if tag.endswith('^{}'): |
|
165 | 165 | tags[tag[:-3]] = alltags[tag] |
|
166 | 166 | else: |
|
167 | 167 | if tag + '^{}' in alltags: |
|
168 | 168 | continue |
|
169 | 169 | else: |
|
170 | 170 | tags[tag] = alltags[tag] |
|
171 | 171 | |
|
172 | 172 | return tags |
|
173 | 173 | |
|
174 | 174 | def getchangedfiles(self, version, i): |
|
175 | 175 | changes = [] |
|
176 | 176 | if i is None: |
|
177 | 177 | fh = self.gitopen("git diff-tree --root -m -r %s" % version) |
|
178 | 178 | for l in fh: |
|
179 | 179 | if "\t" not in l: |
|
180 | 180 | continue |
|
181 | 181 | m, f = l[:-1].split("\t") |
|
182 | 182 | changes.append(f) |
|
183 | 183 | else: |
|
184 | 184 | fh = self.gitopen('git diff-tree --name-only --root -r %s ' |
|
185 | 185 | '"%s^%s" --' % (version, version, i + 1)) |
|
186 | 186 | changes = [f.rstrip('\n') for f in fh] |
|
187 | 187 | if fh.close(): |
|
188 | 188 | raise util.Abort(_('cannot read changes in %s') % version) |
|
189 | 189 | |
|
190 | 190 | return changes |
|
191 | 191 | |
|
192 | 192 | def getbookmarks(self): |
|
193 | 193 | bookmarks = {} |
|
194 | 194 | |
|
195 | 195 | # Interesting references in git are prefixed |
|
196 | 196 | prefix = 'refs/heads/' |
|
197 | 197 | prefixlen = len(prefix) |
|
198 | 198 | |
|
199 | 199 | # factor two commands |
|
200 | 200 | gitcmd = { 'remote/': 'git ls-remote --heads origin', |
|
201 | 201 | '': 'git show-ref'} |
|
202 | 202 | |
|
203 | 203 | # Origin heads |
|
204 | 204 | for reftype in gitcmd: |
|
205 | 205 | try: |
|
206 | 206 | fh = self.gitopen(gitcmd[reftype], noerr=True) |
|
207 | 207 | for line in fh: |
|
208 | 208 | line = line.strip() |
|
209 | 209 | rev, name = line.split(None, 1) |
|
210 | 210 | if not name.startswith(prefix): |
|
211 | 211 | continue |
|
212 | 212 | name = '%s%s' % (reftype, name[prefixlen:]) |
|
213 | 213 | bookmarks[name] = rev |
|
214 | 214 | except: |
|
215 | 215 | pass |
|
216 | 216 | |
|
217 | 217 | return bookmarks |
@@ -1,395 +1,395 | |||
|
1 | 1 | # hg.py - hg backend for convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | # Notes for hg->hg conversion: |
|
9 | 9 | # |
|
10 | 10 | # * Old versions of Mercurial didn't trim the whitespace from the ends |
|
11 | 11 | # of commit messages, but new versions do. Changesets created by |
|
12 | 12 | # those older versions, then converted, may thus have different |
|
13 | 13 | # hashes for changesets that are otherwise identical. |
|
14 | 14 | # |
|
15 | 15 | # * Using "--config convert.hg.saverev=true" will make the source |
|
16 | 16 | # identifier to be stored in the converted revision. This will cause |
|
17 | 17 | # the converted revision to have a different identity than the |
|
18 | 18 | # source. |
|
19 | 19 | |
|
20 | 20 | |
|
21 | 21 | import os, time, cStringIO |
|
22 | 22 | from mercurial.i18n import _ |
|
23 | 23 | from mercurial.node import bin, hex, nullid |
|
24 | 24 | from mercurial import hg, util, context, bookmarks, error |
|
25 | 25 | |
|
26 | 26 | from common import NoRepo, commit, converter_source, converter_sink |
|
27 | 27 | |
|
28 | 28 | class mercurial_sink(converter_sink): |
|
29 | 29 | def __init__(self, ui, path): |
|
30 | 30 | converter_sink.__init__(self, ui, path) |
|
31 | 31 | self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True) |
|
32 | 32 | self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False) |
|
33 | 33 | self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default') |
|
34 | 34 | self.lastbranch = None |
|
35 | 35 | if os.path.isdir(path) and len(os.listdir(path)) > 0: |
|
36 | 36 | try: |
|
37 | 37 | self.repo = hg.repository(self.ui, path) |
|
38 | 38 | if not self.repo.local(): |
|
39 | 39 | raise NoRepo(_('%s is not a local Mercurial repository') |
|
40 | 40 | % path) |
|
41 | 41 | except error.RepoError, err: |
|
42 | 42 | ui.traceback() |
|
43 | 43 | raise NoRepo(err.args[0]) |
|
44 | 44 | else: |
|
45 | 45 | try: |
|
46 | 46 | ui.status(_('initializing destination %s repository\n') % path) |
|
47 | 47 | self.repo = hg.repository(self.ui, path, create=True) |
|
48 | 48 | if not self.repo.local(): |
|
49 | 49 | raise NoRepo(_('%s is not a local Mercurial repository') |
|
50 | 50 | % path) |
|
51 | 51 | self.created.append(path) |
|
52 | 52 | except error.RepoError: |
|
53 | 53 | ui.traceback() |
|
54 | 54 | raise NoRepo(_("could not create hg repository %s as sink") |
|
55 | 55 | % path) |
|
56 | 56 | self.lock = None |
|
57 | 57 | self.wlock = None |
|
58 | 58 | self.filemapmode = False |
|
59 | 59 | |
|
60 | 60 | def before(self): |
|
61 | 61 | self.ui.debug('run hg sink pre-conversion action\n') |
|
62 | 62 | self.wlock = self.repo.wlock() |
|
63 | 63 | self.lock = self.repo.lock() |
|
64 | 64 | |
|
65 | 65 | def after(self): |
|
66 | 66 | self.ui.debug('run hg sink post-conversion action\n') |
|
67 | 67 | if self.lock: |
|
68 | 68 | self.lock.release() |
|
69 | 69 | if self.wlock: |
|
70 | 70 | self.wlock.release() |
|
71 | 71 | |
|
72 | 72 | def revmapfile(self): |
|
73 | 73 | return self.repo.join("shamap") |
|
74 | 74 | |
|
75 | 75 | def authorfile(self): |
|
76 | 76 | return self.repo.join("authormap") |
|
77 | 77 | |
|
78 | 78 | def getheads(self): |
|
79 | 79 | h = self.repo.changelog.heads() |
|
80 | 80 | return [hex(x) for x in h] |
|
81 | 81 | |
|
82 | 82 | def setbranch(self, branch, pbranches): |
|
83 | 83 | if not self.clonebranches: |
|
84 | 84 | return |
|
85 | 85 | |
|
86 | 86 | setbranch = (branch != self.lastbranch) |
|
87 | 87 | self.lastbranch = branch |
|
88 | 88 | if not branch: |
|
89 | 89 | branch = 'default' |
|
90 | 90 | pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches] |
|
91 | 91 | pbranch = pbranches and pbranches[0][1] or 'default' |
|
92 | 92 | |
|
93 | 93 | branchpath = os.path.join(self.path, branch) |
|
94 | 94 | if setbranch: |
|
95 | 95 | self.after() |
|
96 | 96 | try: |
|
97 | 97 | self.repo = hg.repository(self.ui, branchpath) |
|
98 | 98 | except: |
|
99 | 99 | self.repo = hg.repository(self.ui, branchpath, create=True) |
|
100 | 100 | self.before() |
|
101 | 101 | |
|
102 | 102 | # pbranches may bring revisions from other branches (merge parents) |
|
103 | 103 | # Make sure we have them, or pull them. |
|
104 | 104 | missings = {} |
|
105 | 105 | for b in pbranches: |
|
106 | 106 | try: |
|
107 | 107 | self.repo.lookup(b[0]) |
|
108 | 108 | except: |
|
109 | 109 | missings.setdefault(b[1], []).append(b[0]) |
|
110 | 110 | |
|
111 | 111 | if missings: |
|
112 | 112 | self.after() |
|
113 | 113 | for pbranch, heads in missings.iteritems(): |
|
114 | 114 | pbranchpath = os.path.join(self.path, pbranch) |
|
115 | 115 | prepo = hg.peer(self.ui, {}, pbranchpath) |
|
116 | 116 | self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch)) |
|
117 | 117 | self.repo.pull(prepo, [prepo.lookup(h) for h in heads]) |
|
118 | 118 | self.before() |
|
119 | 119 | |
|
120 | 120 | def _rewritetags(self, source, revmap, data): |
|
121 | 121 | fp = cStringIO.StringIO() |
|
122 | 122 | for line in data.splitlines(): |
|
123 | 123 | s = line.split(' ', 1) |
|
124 | 124 | if len(s) != 2: |
|
125 | 125 | continue |
|
126 | 126 | revid = revmap.get(source.lookuprev(s[0])) |
|
127 | 127 | if not revid: |
|
128 | 128 | continue |
|
129 | 129 | fp.write('%s %s\n' % (revid, s[1])) |
|
130 | 130 | return fp.getvalue() |
|
131 | 131 | |
|
132 | 132 | def putcommit(self, files, copies, parents, commit, source, revmap): |
|
133 | 133 | |
|
134 | 134 | files = dict(files) |
|
135 | 135 | def getfilectx(repo, memctx, f): |
|
136 | 136 | v = files[f] |
|
137 | 137 | data, mode = source.getfile(f, v) |
|
138 | 138 | if f == '.hgtags': |
|
139 | 139 | data = self._rewritetags(source, revmap, data) |
|
140 | 140 | return context.memfilectx(f, data, 'l' in mode, 'x' in mode, |
|
141 | 141 | copies.get(f)) |
|
142 | 142 | |
|
143 | 143 | pl = [] |
|
144 | 144 | for p in parents: |
|
145 | 145 | if p not in pl: |
|
146 | 146 | pl.append(p) |
|
147 | 147 | parents = pl |
|
148 | 148 | nparents = len(parents) |
|
149 | 149 | if self.filemapmode and nparents == 1: |
|
150 | 150 | m1node = self.repo.changelog.read(bin(parents[0]))[0] |
|
151 | 151 | parent = parents[0] |
|
152 | 152 | |
|
153 | 153 | if len(parents) < 2: |
|
154 | 154 | parents.append(nullid) |
|
155 | 155 | if len(parents) < 2: |
|
156 | 156 | parents.append(nullid) |
|
157 | 157 | p2 = parents.pop(0) |
|
158 | 158 | |
|
159 | 159 | text = commit.desc |
|
160 | 160 | extra = commit.extra.copy() |
|
161 | 161 | if self.branchnames and commit.branch: |
|
162 | 162 | extra['branch'] = commit.branch |
|
163 | 163 | if commit.rev: |
|
164 | 164 | extra['convert_revision'] = commit.rev |
|
165 | 165 | |
|
166 | 166 | while parents: |
|
167 | 167 | p1 = p2 |
|
168 | 168 | p2 = parents.pop(0) |
|
169 | 169 | ctx = context.memctx(self.repo, (p1, p2), text, files.keys(), |
|
170 | 170 | getfilectx, commit.author, commit.date, extra) |
|
171 | 171 | self.repo.commitctx(ctx) |
|
172 | 172 | text = "(octopus merge fixup)\n" |
|
173 | 173 | p2 = hex(self.repo.changelog.tip()) |
|
174 | 174 | |
|
175 | 175 | if self.filemapmode and nparents == 1: |
|
176 | 176 | man = self.repo.manifest |
|
177 | 177 | mnode = self.repo.changelog.read(bin(p2))[0] |
|
178 | 178 | closed = 'close' in commit.extra |
|
179 | 179 | if not closed and not man.cmp(m1node, man.revision(mnode)): |
|
180 | 180 | self.ui.status(_("filtering out empty revision\n")) |
|
181 | 181 | self.repo.rollback(force=True) |
|
182 | 182 | return parent |
|
183 | 183 | return p2 |
|
184 | 184 | |
|
185 | 185 | def puttags(self, tags): |
|
186 | 186 | try: |
|
187 | 187 | parentctx = self.repo[self.tagsbranch] |
|
188 | 188 | tagparent = parentctx.node() |
|
189 | 189 | except error.RepoError: |
|
190 | 190 | parentctx = None |
|
191 | 191 | tagparent = nullid |
|
192 | 192 | |
|
193 | 193 | try: |
|
194 | 194 | oldlines = sorted(parentctx['.hgtags'].data().splitlines(True)) |
|
195 | 195 | except: |
|
196 | 196 | oldlines = [] |
|
197 | 197 | |
|
198 | 198 | newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags]) |
|
199 | 199 | if newlines == oldlines: |
|
200 | 200 | return None, None |
|
201 | 201 | data = "".join(newlines) |
|
202 | 202 | def getfilectx(repo, memctx, f): |
|
203 | 203 | return context.memfilectx(f, data, False, False, None) |
|
204 | 204 | |
|
205 | 205 | self.ui.status(_("updating tags\n")) |
|
206 | 206 | date = "%s 0" % int(time.mktime(time.gmtime())) |
|
207 | 207 | extra = {'branch': self.tagsbranch} |
|
208 | 208 | ctx = context.memctx(self.repo, (tagparent, None), "update tags", |
|
209 | 209 | [".hgtags"], getfilectx, "convert-repo", date, |
|
210 | 210 | extra) |
|
211 | 211 | self.repo.commitctx(ctx) |
|
212 | 212 | return hex(self.repo.changelog.tip()), hex(tagparent) |
|
213 | 213 | |
|
214 | 214 | def setfilemapmode(self, active): |
|
215 | 215 | self.filemapmode = active |
|
216 | 216 | |
|
217 | 217 | def putbookmarks(self, updatedbookmark): |
|
218 | 218 | if not len(updatedbookmark): |
|
219 | 219 | return |
|
220 | 220 | |
|
221 | 221 | self.ui.status(_("updating bookmarks\n")) |
|
222 | 222 | for bookmark in updatedbookmark: |
|
223 | 223 | self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark]) |
|
224 | 224 | bookmarks.write(self.repo) |
|
225 | 225 | |
|
226 | 226 | def hascommit(self, rev): |
|
227 | 227 | if rev not in self.repo and self.clonebranches: |
|
228 | 228 | raise util.Abort(_('revision %s not found in destination ' |
|
229 | 229 | 'repository (lookups with clonebranches=true ' |
|
230 | 230 | 'are not implemented)') % rev) |
|
231 | 231 | return rev in self.repo |
|
232 | 232 | |
|
233 | 233 | class mercurial_source(converter_source): |
|
234 | 234 | def __init__(self, ui, path, rev=None): |
|
235 | 235 | converter_source.__init__(self, ui, path, rev) |
|
236 | 236 | self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False) |
|
237 | 237 | self.ignored = set() |
|
238 | 238 | self.saverev = ui.configbool('convert', 'hg.saverev', False) |
|
239 | 239 | try: |
|
240 | 240 | self.repo = hg.repository(self.ui, path) |
|
241 | 241 | # try to provoke an exception if this isn't really a hg |
|
242 | 242 | # repo, but some other bogus compatible-looking url |
|
243 | 243 | if not self.repo.local(): |
|
244 |
raise error.RepoError |
|
|
244 | raise error.RepoError | |
|
245 | 245 | except error.RepoError: |
|
246 | 246 | ui.traceback() |
|
247 | 247 | raise NoRepo(_("%s is not a local Mercurial repository") % path) |
|
248 | 248 | self.lastrev = None |
|
249 | 249 | self.lastctx = None |
|
250 | 250 | self._changescache = None |
|
251 | 251 | self.convertfp = None |
|
252 | 252 | # Restrict converted revisions to startrev descendants |
|
253 | 253 | startnode = ui.config('convert', 'hg.startrev') |
|
254 | 254 | if startnode is not None: |
|
255 | 255 | try: |
|
256 | 256 | startnode = self.repo.lookup(startnode) |
|
257 | 257 | except error.RepoError: |
|
258 | 258 | raise util.Abort(_('%s is not a valid start revision') |
|
259 | 259 | % startnode) |
|
260 | 260 | startrev = self.repo.changelog.rev(startnode) |
|
261 | 261 | children = {startnode: 1} |
|
262 | 262 | for rev in self.repo.changelog.descendants(startrev): |
|
263 | 263 | children[self.repo.changelog.node(rev)] = 1 |
|
264 | 264 | self.keep = children.__contains__ |
|
265 | 265 | else: |
|
266 | 266 | self.keep = util.always |
|
267 | 267 | |
|
268 | 268 | def changectx(self, rev): |
|
269 | 269 | if self.lastrev != rev: |
|
270 | 270 | self.lastctx = self.repo[rev] |
|
271 | 271 | self.lastrev = rev |
|
272 | 272 | return self.lastctx |
|
273 | 273 | |
|
274 | 274 | def parents(self, ctx): |
|
275 | 275 | return [p for p in ctx.parents() if p and self.keep(p.node())] |
|
276 | 276 | |
|
277 | 277 | def getheads(self): |
|
278 | 278 | if self.rev: |
|
279 | 279 | heads = [self.repo[self.rev].node()] |
|
280 | 280 | else: |
|
281 | 281 | heads = self.repo.heads() |
|
282 | 282 | return [hex(h) for h in heads if self.keep(h)] |
|
283 | 283 | |
|
284 | 284 | def getfile(self, name, rev): |
|
285 | 285 | try: |
|
286 | 286 | fctx = self.changectx(rev)[name] |
|
287 | 287 | return fctx.data(), fctx.flags() |
|
288 | 288 | except error.LookupError, err: |
|
289 | 289 | raise IOError(err) |
|
290 | 290 | |
|
291 | 291 | def getchanges(self, rev): |
|
292 | 292 | ctx = self.changectx(rev) |
|
293 | 293 | parents = self.parents(ctx) |
|
294 | 294 | if not parents: |
|
295 | 295 | files = sorted(ctx.manifest()) |
|
296 | 296 | # getcopies() is not needed for roots, but it is a simple way to |
|
297 | 297 | # detect missing revlogs and abort on errors or populate |
|
298 | 298 | # self.ignored |
|
299 | 299 | self.getcopies(ctx, parents, files) |
|
300 | 300 | return [(f, rev) for f in files if f not in self.ignored], {} |
|
301 | 301 | if self._changescache and self._changescache[0] == rev: |
|
302 | 302 | m, a, r = self._changescache[1] |
|
303 | 303 | else: |
|
304 | 304 | m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3] |
|
305 | 305 | # getcopies() detects missing revlogs early, run it before |
|
306 | 306 | # filtering the changes. |
|
307 | 307 | copies = self.getcopies(ctx, parents, m + a) |
|
308 | 308 | changes = [(name, rev) for name in m + a + r |
|
309 | 309 | if name not in self.ignored] |
|
310 | 310 | return sorted(changes), copies |
|
311 | 311 | |
|
312 | 312 | def getcopies(self, ctx, parents, files): |
|
313 | 313 | copies = {} |
|
314 | 314 | for name in files: |
|
315 | 315 | if name in self.ignored: |
|
316 | 316 | continue |
|
317 | 317 | try: |
|
318 | 318 | copysource, copynode = ctx.filectx(name).renamed() |
|
319 | 319 | if copysource in self.ignored or not self.keep(copynode): |
|
320 | 320 | continue |
|
321 | 321 | # Ignore copy sources not in parent revisions |
|
322 | 322 | found = False |
|
323 | 323 | for p in parents: |
|
324 | 324 | if copysource in p: |
|
325 | 325 | found = True |
|
326 | 326 | break |
|
327 | 327 | if not found: |
|
328 | 328 | continue |
|
329 | 329 | copies[name] = copysource |
|
330 | 330 | except TypeError: |
|
331 | 331 | pass |
|
332 | 332 | except error.LookupError, e: |
|
333 | 333 | if not self.ignoreerrors: |
|
334 | 334 | raise |
|
335 | 335 | self.ignored.add(name) |
|
336 | 336 | self.ui.warn(_('ignoring: %s\n') % e) |
|
337 | 337 | return copies |
|
338 | 338 | |
|
339 | 339 | def getcommit(self, rev): |
|
340 | 340 | ctx = self.changectx(rev) |
|
341 | 341 | parents = [p.hex() for p in self.parents(ctx)] |
|
342 | 342 | if self.saverev: |
|
343 | 343 | crev = rev |
|
344 | 344 | else: |
|
345 | 345 | crev = None |
|
346 | 346 | return commit(author=ctx.user(), |
|
347 | 347 | date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'), |
|
348 | 348 | desc=ctx.description(), rev=crev, parents=parents, |
|
349 | 349 | branch=ctx.branch(), extra=ctx.extra(), |
|
350 | 350 | sortkey=ctx.rev()) |
|
351 | 351 | |
|
352 | 352 | def gettags(self): |
|
353 | 353 | tags = [t for t in self.repo.tagslist() if t[0] != 'tip'] |
|
354 | 354 | return dict([(name, hex(node)) for name, node in tags |
|
355 | 355 | if self.keep(node)]) |
|
356 | 356 | |
|
357 | 357 | def getchangedfiles(self, rev, i): |
|
358 | 358 | ctx = self.changectx(rev) |
|
359 | 359 | parents = self.parents(ctx) |
|
360 | 360 | if not parents and i is None: |
|
361 | 361 | i = 0 |
|
362 | 362 | changes = [], ctx.manifest().keys(), [] |
|
363 | 363 | else: |
|
364 | 364 | i = i or 0 |
|
365 | 365 | changes = self.repo.status(parents[i].node(), ctx.node())[:3] |
|
366 | 366 | changes = [[f for f in l if f not in self.ignored] for l in changes] |
|
367 | 367 | |
|
368 | 368 | if i == 0: |
|
369 | 369 | self._changescache = (rev, changes) |
|
370 | 370 | |
|
371 | 371 | return changes[0] + changes[1] + changes[2] |
|
372 | 372 | |
|
373 | 373 | def converted(self, rev, destrev): |
|
374 | 374 | if self.convertfp is None: |
|
375 | 375 | self.convertfp = open(self.repo.join('shamap'), 'a') |
|
376 | 376 | self.convertfp.write('%s %s\n' % (destrev, rev)) |
|
377 | 377 | self.convertfp.flush() |
|
378 | 378 | |
|
379 | 379 | def before(self): |
|
380 | 380 | self.ui.debug('run hg source pre-conversion action\n') |
|
381 | 381 | |
|
382 | 382 | def after(self): |
|
383 | 383 | self.ui.debug('run hg source post-conversion action\n') |
|
384 | 384 | |
|
385 | 385 | def hasnativeorder(self): |
|
386 | 386 | return True |
|
387 | 387 | |
|
388 | 388 | def lookuprev(self, rev): |
|
389 | 389 | try: |
|
390 | 390 | return hex(self.repo.lookup(rev)) |
|
391 | 391 | except error.RepoError: |
|
392 | 392 | return None |
|
393 | 393 | |
|
394 | 394 | def getbookmarks(self): |
|
395 | 395 | return bookmarks.listbookmarks(self.repo) |
@@ -1,360 +1,360 | |||
|
1 | 1 | # monotone.py - monotone support for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and |
|
4 | 4 | # others |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | import os, re |
|
10 | 10 | from mercurial import util |
|
11 | 11 | from common import NoRepo, commit, converter_source, checktool |
|
12 | 12 | from common import commandline |
|
13 | 13 | from mercurial.i18n import _ |
|
14 | 14 | |
|
15 | 15 | class monotone_source(converter_source, commandline): |
|
16 | 16 | def __init__(self, ui, path=None, rev=None): |
|
17 | 17 | converter_source.__init__(self, ui, path, rev) |
|
18 | 18 | commandline.__init__(self, ui, 'mtn') |
|
19 | 19 | |
|
20 | 20 | self.ui = ui |
|
21 | 21 | self.path = path |
|
22 | 22 | self.automatestdio = False |
|
23 | 23 | self.rev = rev |
|
24 | 24 | |
|
25 | 25 | norepo = NoRepo(_("%s does not look like a monotone repository") |
|
26 | 26 | % path) |
|
27 | 27 | if not os.path.exists(os.path.join(path, '_MTN')): |
|
28 | 28 | # Could be a monotone repository (SQLite db file) |
|
29 | 29 | try: |
|
30 | 30 | f = file(path, 'rb') |
|
31 | 31 | header = f.read(16) |
|
32 | 32 | f.close() |
|
33 | 33 | except: |
|
34 | 34 | header = '' |
|
35 | 35 | if header != 'SQLite format 3\x00': |
|
36 | 36 | raise norepo |
|
37 | 37 | |
|
38 | 38 | # regular expressions for parsing monotone output |
|
39 | 39 | space = r'\s*' |
|
40 | 40 | name = r'\s+"((?:\\"|[^"])*)"\s*' |
|
41 | 41 | value = name |
|
42 | 42 | revision = r'\s+\[(\w+)\]\s*' |
|
43 | 43 | lines = r'(?:.|\n)+' |
|
44 | 44 | |
|
45 | 45 | self.dir_re = re.compile(space + "dir" + name) |
|
46 | 46 | self.file_re = re.compile(space + "file" + name + |
|
47 | 47 | "content" + revision) |
|
48 | 48 | self.add_file_re = re.compile(space + "add_file" + name + |
|
49 | 49 | "content" + revision) |
|
50 | 50 | self.patch_re = re.compile(space + "patch" + name + |
|
51 | 51 | "from" + revision + "to" + revision) |
|
52 | 52 | self.rename_re = re.compile(space + "rename" + name + "to" + name) |
|
53 | 53 | self.delete_re = re.compile(space + "delete" + name) |
|
54 | 54 | self.tag_re = re.compile(space + "tag" + name + "revision" + |
|
55 | 55 | revision) |
|
56 | 56 | self.cert_re = re.compile(lines + space + "name" + name + |
|
57 | 57 | "value" + value) |
|
58 | 58 | |
|
59 | 59 | attr = space + "file" + lines + space + "attr" + space |
|
60 | 60 | self.attr_execute_re = re.compile(attr + '"mtn:execute"' + |
|
61 | 61 | space + '"true"') |
|
62 | 62 | |
|
63 | 63 | # cached data |
|
64 | 64 | self.manifest_rev = None |
|
65 | 65 | self.manifest = None |
|
66 | 66 | self.files = None |
|
67 | 67 | self.dirs = None |
|
68 | 68 | |
|
69 | 69 | checktool('mtn', abort=False) |
|
70 | 70 | |
|
71 | 71 | def mtnrun(self, *args, **kwargs): |
|
72 | 72 | if self.automatestdio: |
|
73 | 73 | return self.mtnrunstdio(*args, **kwargs) |
|
74 | 74 | else: |
|
75 | 75 | return self.mtnrunsingle(*args, **kwargs) |
|
76 | 76 | |
|
77 | 77 | def mtnrunsingle(self, *args, **kwargs): |
|
78 | 78 | kwargs['d'] = self.path |
|
79 | 79 | return self.run0('automate', *args, **kwargs) |
|
80 | 80 | |
|
81 | 81 | def mtnrunstdio(self, *args, **kwargs): |
|
82 | 82 | # Prepare the command in automate stdio format |
|
83 | 83 | command = [] |
|
84 | 84 | for k, v in kwargs.iteritems(): |
|
85 | 85 | command.append("%s:%s" % (len(k), k)) |
|
86 | 86 | if v: |
|
87 | 87 | command.append("%s:%s" % (len(v), v)) |
|
88 | 88 | if command: |
|
89 | 89 | command.insert(0, 'o') |
|
90 | 90 | command.append('e') |
|
91 | 91 | |
|
92 | 92 | command.append('l') |
|
93 | 93 | for arg in args: |
|
94 | 94 | command += "%s:%s" % (len(arg), arg) |
|
95 | 95 | command.append('e') |
|
96 | 96 | command = ''.join(command) |
|
97 | 97 | |
|
98 | 98 | self.ui.debug("mtn: sending '%s'\n" % command) |
|
99 | 99 | self.mtnwritefp.write(command) |
|
100 | 100 | self.mtnwritefp.flush() |
|
101 | 101 | |
|
102 | 102 | return self.mtnstdioreadcommandoutput(command) |
|
103 | 103 | |
|
104 | 104 | def mtnstdioreadpacket(self): |
|
105 | 105 | read = None |
|
106 | 106 | commandnbr = '' |
|
107 | 107 | while read != ':': |
|
108 | 108 | read = self.mtnreadfp.read(1) |
|
109 | 109 | if not read: |
|
110 | 110 | raise util.Abort(_('bad mtn packet - no end of commandnbr')) |
|
111 | 111 | commandnbr += read |
|
112 | 112 | commandnbr = commandnbr[:-1] |
|
113 | 113 | |
|
114 | 114 | stream = self.mtnreadfp.read(1) |
|
115 | 115 | if stream not in 'mewptl': |
|
116 | 116 | raise util.Abort(_('bad mtn packet - bad stream type %s') % stream) |
|
117 | 117 | |
|
118 | 118 | read = self.mtnreadfp.read(1) |
|
119 | 119 | if read != ':': |
|
120 | 120 | raise util.Abort(_('bad mtn packet - no divider before size')) |
|
121 | 121 | |
|
122 | 122 | read = None |
|
123 | 123 | lengthstr = '' |
|
124 | 124 | while read != ':': |
|
125 | 125 | read = self.mtnreadfp.read(1) |
|
126 | 126 | if not read: |
|
127 | 127 | raise util.Abort(_('bad mtn packet - no end of packet size')) |
|
128 | 128 | lengthstr += read |
|
129 | 129 | try: |
|
130 | 130 | length = long(lengthstr[:-1]) |
|
131 | 131 | except TypeError: |
|
132 | 132 | raise util.Abort(_('bad mtn packet - bad packet size %s') |
|
133 | 133 | % lengthstr) |
|
134 | 134 | |
|
135 | 135 | read = self.mtnreadfp.read(length) |
|
136 | 136 | if len(read) != length: |
|
137 | 137 | raise util.Abort(_("bad mtn packet - unable to read full packet " |
|
138 | 138 | "read %s of %s") % (len(read), length)) |
|
139 | 139 | |
|
140 | 140 | return (commandnbr, stream, length, read) |
|
141 | 141 | |
|
142 | 142 | def mtnstdioreadcommandoutput(self, command): |
|
143 | 143 | retval = [] |
|
144 | 144 | while True: |
|
145 | 145 | commandnbr, stream, length, output = self.mtnstdioreadpacket() |
|
146 | 146 | self.ui.debug('mtn: read packet %s:%s:%s\n' % |
|
147 | 147 | (commandnbr, stream, length)) |
|
148 | 148 | |
|
149 | 149 | if stream == 'l': |
|
150 | 150 | # End of command |
|
151 | 151 | if output != '0': |
|
152 | 152 | raise util.Abort(_("mtn command '%s' returned %s") % |
|
153 | 153 | (command, output)) |
|
154 | 154 | break |
|
155 | 155 | elif stream in 'ew': |
|
156 | 156 | # Error, warning output |
|
157 | 157 | self.ui.warn(_('%s error:\n') % self.command) |
|
158 | 158 | self.ui.warn(output) |
|
159 | 159 | elif stream == 'p': |
|
160 | 160 | # Progress messages |
|
161 | 161 | self.ui.debug('mtn: ' + output) |
|
162 | 162 | elif stream == 'm': |
|
163 | 163 | # Main stream - command output |
|
164 | 164 | retval.append(output) |
|
165 | 165 | |
|
166 | 166 | return ''.join(retval) |
|
167 | 167 | |
|
168 | 168 | def mtnloadmanifest(self, rev): |
|
169 | 169 | if self.manifest_rev == rev: |
|
170 | 170 | return |
|
171 | 171 | self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n") |
|
172 | 172 | self.manifest_rev = rev |
|
173 | 173 | self.files = {} |
|
174 | 174 | self.dirs = {} |
|
175 | 175 | |
|
176 | 176 | for e in self.manifest: |
|
177 | 177 | m = self.file_re.match(e) |
|
178 | 178 | if m: |
|
179 | 179 | attr = "" |
|
180 | 180 | name = m.group(1) |
|
181 | 181 | node = m.group(2) |
|
182 | 182 | if self.attr_execute_re.match(e): |
|
183 | 183 | attr += "x" |
|
184 | 184 | self.files[name] = (node, attr) |
|
185 | 185 | m = self.dir_re.match(e) |
|
186 | 186 | if m: |
|
187 | 187 | self.dirs[m.group(1)] = True |
|
188 | 188 | |
|
189 | 189 | def mtnisfile(self, name, rev): |
|
190 | 190 | # a non-file could be a directory or a deleted or renamed file |
|
191 | 191 | self.mtnloadmanifest(rev) |
|
192 | 192 | return name in self.files |
|
193 | 193 | |
|
194 | 194 | def mtnisdir(self, name, rev): |
|
195 | 195 | self.mtnloadmanifest(rev) |
|
196 | 196 | return name in self.dirs |
|
197 | 197 | |
|
198 | 198 | def mtngetcerts(self, rev): |
|
199 | 199 | certs = {"author":"<missing>", "date":"<missing>", |
|
200 | 200 | "changelog":"<missing>", "branch":"<missing>"} |
|
201 | 201 | certlist = self.mtnrun("certs", rev) |
|
202 | 202 | # mtn < 0.45: |
|
203 | 203 | # key "test@selenic.com" |
|
204 | 204 | # mtn >= 0.45: |
|
205 | 205 | # key [ff58a7ffb771907c4ff68995eada1c4da068d328] |
|
206 | 206 | certlist = re.split('\n\n key ["\[]', certlist) |
|
207 | 207 | for e in certlist: |
|
208 | 208 | m = self.cert_re.match(e) |
|
209 | 209 | if m: |
|
210 | 210 | name, value = m.groups() |
|
211 | 211 | value = value.replace(r'\"', '"') |
|
212 | 212 | value = value.replace(r'\\', '\\') |
|
213 | 213 | certs[name] = value |
|
214 | 214 | # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306 |
|
215 | 215 | # and all times are stored in UTC |
|
216 | 216 | certs["date"] = certs["date"].split('.')[0] + " UTC" |
|
217 | 217 | return certs |
|
218 | 218 | |
|
219 | 219 | # implement the converter_source interface: |
|
220 | 220 | |
|
221 | 221 | def getheads(self): |
|
222 | 222 | if not self.rev: |
|
223 | 223 | return self.mtnrun("leaves").splitlines() |
|
224 | 224 | else: |
|
225 | 225 | return [self.rev] |
|
226 | 226 | |
|
227 | 227 | def getchanges(self, rev): |
|
228 | 228 | #revision = self.mtncmd("get_revision %s" % rev).split("\n\n") |
|
229 | 229 | revision = self.mtnrun("get_revision", rev).split("\n\n") |
|
230 | 230 | files = {} |
|
231 | 231 | ignoremove = {} |
|
232 | 232 | renameddirs = [] |
|
233 | 233 | copies = {} |
|
234 | 234 | for e in revision: |
|
235 | 235 | m = self.add_file_re.match(e) |
|
236 | 236 | if m: |
|
237 | 237 | files[m.group(1)] = rev |
|
238 | 238 | ignoremove[m.group(1)] = rev |
|
239 | 239 | m = self.patch_re.match(e) |
|
240 | 240 | if m: |
|
241 | 241 | files[m.group(1)] = rev |
|
242 | 242 | # Delete/rename is handled later when the convert engine |
|
243 | 243 | # discovers an IOError exception from getfile, |
|
244 | 244 | # but only if we add the "from" file to the list of changes. |
|
245 | 245 | m = self.delete_re.match(e) |
|
246 | 246 | if m: |
|
247 | 247 | files[m.group(1)] = rev |
|
248 | 248 | m = self.rename_re.match(e) |
|
249 | 249 | if m: |
|
250 | 250 | toname = m.group(2) |
|
251 | 251 | fromname = m.group(1) |
|
252 | 252 | if self.mtnisfile(toname, rev): |
|
253 | 253 | ignoremove[toname] = 1 |
|
254 | 254 | copies[toname] = fromname |
|
255 | 255 | files[toname] = rev |
|
256 | 256 | files[fromname] = rev |
|
257 | 257 | elif self.mtnisdir(toname, rev): |
|
258 | 258 | renameddirs.append((fromname, toname)) |
|
259 | 259 | |
|
260 | 260 | # Directory renames can be handled only once we have recorded |
|
261 | 261 | # all new files |
|
262 | 262 | for fromdir, todir in renameddirs: |
|
263 | 263 | renamed = {} |
|
264 | 264 | for tofile in self.files: |
|
265 | 265 | if tofile in ignoremove: |
|
266 | 266 | continue |
|
267 | 267 | if tofile.startswith(todir + '/'): |
|
268 | 268 | renamed[tofile] = fromdir + tofile[len(todir):] |
|
269 | 269 | # Avoid chained moves like: |
|
270 | 270 | # d1(/a) => d3/d1(/a) |
|
271 | 271 | # d2 => d3 |
|
272 | 272 | ignoremove[tofile] = 1 |
|
273 | 273 | for tofile, fromfile in renamed.items(): |
|
274 | 274 | self.ui.debug (_("copying file in renamed directory " |
|
275 | 275 | "from '%s' to '%s'") |
|
276 | 276 | % (fromfile, tofile), '\n') |
|
277 | 277 | files[tofile] = rev |
|
278 | 278 | copies[tofile] = fromfile |
|
279 | 279 | for fromfile in renamed.values(): |
|
280 | 280 | files[fromfile] = rev |
|
281 | 281 | |
|
282 | 282 | return (files.items(), copies) |
|
283 | 283 | |
|
284 | 284 | def getfile(self, name, rev): |
|
285 | 285 | if not self.mtnisfile(name, rev): |
|
286 |
raise IOError |
|
|
286 | raise IOError # file was deleted or renamed | |
|
287 | 287 | try: |
|
288 | 288 | data = self.mtnrun("get_file_of", name, r=rev) |
|
289 | 289 | except: |
|
290 |
raise IOError |
|
|
290 | raise IOError # file was deleted or renamed | |
|
291 | 291 | self.mtnloadmanifest(rev) |
|
292 | 292 | node, attr = self.files.get(name, (None, "")) |
|
293 | 293 | return data, attr |
|
294 | 294 | |
|
295 | 295 | def getcommit(self, rev): |
|
296 | 296 | extra = {} |
|
297 | 297 | certs = self.mtngetcerts(rev) |
|
298 | 298 | if certs.get('suspend') == certs["branch"]: |
|
299 | 299 | extra['close'] = '1' |
|
300 | 300 | return commit( |
|
301 | 301 | author=certs["author"], |
|
302 | 302 | date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")), |
|
303 | 303 | desc=certs["changelog"], |
|
304 | 304 | rev=rev, |
|
305 | 305 | parents=self.mtnrun("parents", rev).splitlines(), |
|
306 | 306 | branch=certs["branch"], |
|
307 | 307 | extra=extra) |
|
308 | 308 | |
|
309 | 309 | def gettags(self): |
|
310 | 310 | tags = {} |
|
311 | 311 | for e in self.mtnrun("tags").split("\n\n"): |
|
312 | 312 | m = self.tag_re.match(e) |
|
313 | 313 | if m: |
|
314 | 314 | tags[m.group(1)] = m.group(2) |
|
315 | 315 | return tags |
|
316 | 316 | |
|
317 | 317 | def getchangedfiles(self, rev, i): |
|
318 | 318 | # This function is only needed to support --filemap |
|
319 | 319 | # ... and we don't support that |
|
320 |
raise NotImplementedError |
|
|
320 | raise NotImplementedError | |
|
321 | 321 | |
|
322 | 322 | def before(self): |
|
323 | 323 | # Check if we have a new enough version to use automate stdio |
|
324 | 324 | version = 0.0 |
|
325 | 325 | try: |
|
326 | 326 | versionstr = self.mtnrunsingle("interface_version") |
|
327 | 327 | version = float(versionstr) |
|
328 | 328 | except Exception: |
|
329 | 329 | raise util.Abort(_("unable to determine mtn automate interface " |
|
330 | 330 | "version")) |
|
331 | 331 | |
|
332 | 332 | if version >= 12.0: |
|
333 | 333 | self.automatestdio = True |
|
334 | 334 | self.ui.debug("mtn automate version %s - using automate stdio\n" % |
|
335 | 335 | version) |
|
336 | 336 | |
|
337 | 337 | # launch the long-running automate stdio process |
|
338 | 338 | self.mtnwritefp, self.mtnreadfp = self._run2('automate', 'stdio', |
|
339 | 339 | '-d', self.path) |
|
340 | 340 | # read the headers |
|
341 | 341 | read = self.mtnreadfp.readline() |
|
342 | 342 | if read != 'format-version: 2\n': |
|
343 | 343 | raise util.Abort(_('mtn automate stdio header unexpected: %s') |
|
344 | 344 | % read) |
|
345 | 345 | while read != '\n': |
|
346 | 346 | read = self.mtnreadfp.readline() |
|
347 | 347 | if not read: |
|
348 | 348 | raise util.Abort(_("failed to reach end of mtn automate " |
|
349 | 349 | "stdio headers")) |
|
350 | 350 | else: |
|
351 | 351 | self.ui.debug("mtn automate version %s - not using automate stdio " |
|
352 | 352 | "(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version) |
|
353 | 353 | |
|
354 | 354 | def after(self): |
|
355 | 355 | if self.automatestdio: |
|
356 | 356 | self.mtnwritefp.close() |
|
357 | 357 | self.mtnwritefp = None |
|
358 | 358 | self.mtnreadfp.close() |
|
359 | 359 | self.mtnreadfp = None |
|
360 | 360 |
@@ -1,1252 +1,1252 | |||
|
1 | 1 | # Subversion 1.4/1.5 Python API backend |
|
2 | 2 | # |
|
3 | 3 | # Copyright(C) 2007 Daniel Holth et al |
|
4 | 4 | |
|
5 | 5 | import os, re, sys, tempfile, urllib, urllib2, xml.dom.minidom |
|
6 | 6 | import cPickle as pickle |
|
7 | 7 | |
|
8 | 8 | from mercurial import strutil, scmutil, util, encoding |
|
9 | 9 | from mercurial.i18n import _ |
|
10 | 10 | |
|
11 | 11 | propertycache = util.propertycache |
|
12 | 12 | |
|
13 | 13 | # Subversion stuff. Works best with very recent Python SVN bindings |
|
14 | 14 | # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing |
|
15 | 15 | # these bindings. |
|
16 | 16 | |
|
17 | 17 | from cStringIO import StringIO |
|
18 | 18 | |
|
19 | 19 | from common import NoRepo, MissingTool, commit, encodeargs, decodeargs |
|
20 | 20 | from common import commandline, converter_source, converter_sink, mapfile |
|
21 | 21 | |
|
22 | 22 | try: |
|
23 | 23 | from svn.core import SubversionException, Pool |
|
24 | 24 | import svn |
|
25 | 25 | import svn.client |
|
26 | 26 | import svn.core |
|
27 | 27 | import svn.ra |
|
28 | 28 | import svn.delta |
|
29 | 29 | import transport |
|
30 | 30 | import warnings |
|
31 | 31 | warnings.filterwarnings('ignore', |
|
32 | 32 | module='svn.core', |
|
33 | 33 | category=DeprecationWarning) |
|
34 | 34 | |
|
35 | 35 | except ImportError: |
|
36 | 36 | svn = None |
|
37 | 37 | |
|
38 | 38 | class SvnPathNotFound(Exception): |
|
39 | 39 | pass |
|
40 | 40 | |
|
41 | 41 | def revsplit(rev): |
|
42 | 42 | """Parse a revision string and return (uuid, path, revnum).""" |
|
43 | 43 | url, revnum = rev.rsplit('@', 1) |
|
44 | 44 | parts = url.split('/', 1) |
|
45 | 45 | mod = '' |
|
46 | 46 | if len(parts) > 1: |
|
47 | 47 | mod = '/' + parts[1] |
|
48 | 48 | return parts[0][4:], mod, int(revnum) |
|
49 | 49 | |
|
50 | 50 | def quote(s): |
|
51 | 51 | # As of svn 1.7, many svn calls expect "canonical" paths. In |
|
52 | 52 | # theory, we should call svn.core.*canonicalize() on all paths |
|
53 | 53 | # before passing them to the API. Instead, we assume the base url |
|
54 | 54 | # is canonical and copy the behaviour of svn URL encoding function |
|
55 | 55 | # so we can extend it safely with new components. The "safe" |
|
56 | 56 | # characters were taken from the "svn_uri__char_validity" table in |
|
57 | 57 | # libsvn_subr/path.c. |
|
58 | 58 | return urllib.quote(s, "!$&'()*+,-./:=@_~") |
|
59 | 59 | |
|
60 | 60 | def geturl(path): |
|
61 | 61 | try: |
|
62 | 62 | return svn.client.url_from_path(svn.core.svn_path_canonicalize(path)) |
|
63 | 63 | except SubversionException: |
|
64 | 64 | # svn.client.url_from_path() fails with local repositories |
|
65 | 65 | pass |
|
66 | 66 | if os.path.isdir(path): |
|
67 | 67 | path = os.path.normpath(os.path.abspath(path)) |
|
68 | 68 | if os.name == 'nt': |
|
69 | 69 | path = '/' + util.normpath(path) |
|
70 | 70 | # Module URL is later compared with the repository URL returned |
|
71 | 71 | # by svn API, which is UTF-8. |
|
72 | 72 | path = encoding.tolocal(path) |
|
73 | 73 | path = 'file://%s' % quote(path) |
|
74 | 74 | return svn.core.svn_path_canonicalize(path) |
|
75 | 75 | |
|
76 | 76 | def optrev(number): |
|
77 | 77 | optrev = svn.core.svn_opt_revision_t() |
|
78 | 78 | optrev.kind = svn.core.svn_opt_revision_number |
|
79 | 79 | optrev.value.number = number |
|
80 | 80 | return optrev |
|
81 | 81 | |
|
82 | 82 | class changedpath(object): |
|
83 | 83 | def __init__(self, p): |
|
84 | 84 | self.copyfrom_path = p.copyfrom_path |
|
85 | 85 | self.copyfrom_rev = p.copyfrom_rev |
|
86 | 86 | self.action = p.action |
|
87 | 87 | |
|
88 | 88 | def get_log_child(fp, url, paths, start, end, limit=0, |
|
89 | 89 | discover_changed_paths=True, strict_node_history=False): |
|
90 | 90 | protocol = -1 |
|
91 | 91 | def receiver(orig_paths, revnum, author, date, message, pool): |
|
92 | 92 | if orig_paths is not None: |
|
93 | 93 | for k, v in orig_paths.iteritems(): |
|
94 | 94 | orig_paths[k] = changedpath(v) |
|
95 | 95 | pickle.dump((orig_paths, revnum, author, date, message), |
|
96 | 96 | fp, protocol) |
|
97 | 97 | |
|
98 | 98 | try: |
|
99 | 99 | # Use an ra of our own so that our parent can consume |
|
100 | 100 | # our results without confusing the server. |
|
101 | 101 | t = transport.SvnRaTransport(url=url) |
|
102 | 102 | svn.ra.get_log(t.ra, paths, start, end, limit, |
|
103 | 103 | discover_changed_paths, |
|
104 | 104 | strict_node_history, |
|
105 | 105 | receiver) |
|
106 | 106 | except IOError: |
|
107 | 107 | # Caller may interrupt the iteration |
|
108 | 108 | pickle.dump(None, fp, protocol) |
|
109 | 109 | except Exception, inst: |
|
110 | 110 | pickle.dump(str(inst), fp, protocol) |
|
111 | 111 | else: |
|
112 | 112 | pickle.dump(None, fp, protocol) |
|
113 | 113 | fp.close() |
|
114 | 114 | # With large history, cleanup process goes crazy and suddenly |
|
115 | 115 | # consumes *huge* amount of memory. The output file being closed, |
|
116 | 116 | # there is no need for clean termination. |
|
117 | 117 | os._exit(0) |
|
118 | 118 | |
|
119 | 119 | def debugsvnlog(ui, **opts): |
|
120 | 120 | """Fetch SVN log in a subprocess and channel them back to parent to |
|
121 | 121 | avoid memory collection issues. |
|
122 | 122 | """ |
|
123 | 123 | util.setbinary(sys.stdin) |
|
124 | 124 | util.setbinary(sys.stdout) |
|
125 | 125 | args = decodeargs(sys.stdin.read()) |
|
126 | 126 | get_log_child(sys.stdout, *args) |
|
127 | 127 | |
|
128 | 128 | class logstream(object): |
|
129 | 129 | """Interruptible revision log iterator.""" |
|
130 | 130 | def __init__(self, stdout): |
|
131 | 131 | self._stdout = stdout |
|
132 | 132 | |
|
133 | 133 | def __iter__(self): |
|
134 | 134 | while True: |
|
135 | 135 | try: |
|
136 | 136 | entry = pickle.load(self._stdout) |
|
137 | 137 | except EOFError: |
|
138 | 138 | raise util.Abort(_('Mercurial failed to run itself, check' |
|
139 | 139 | ' hg executable is in PATH')) |
|
140 | 140 | try: |
|
141 | 141 | orig_paths, revnum, author, date, message = entry |
|
142 | 142 | except: |
|
143 | 143 | if entry is None: |
|
144 | 144 | break |
|
145 | 145 | raise util.Abort(_("log stream exception '%s'") % entry) |
|
146 | 146 | yield entry |
|
147 | 147 | |
|
148 | 148 | def close(self): |
|
149 | 149 | if self._stdout: |
|
150 | 150 | self._stdout.close() |
|
151 | 151 | self._stdout = None |
|
152 | 152 | |
|
153 | 153 | |
|
154 | 154 | # Check to see if the given path is a local Subversion repo. Verify this by |
|
155 | 155 | # looking for several svn-specific files and directories in the given |
|
156 | 156 | # directory. |
|
157 | 157 | def filecheck(ui, path, proto): |
|
158 | 158 | for x in ('locks', 'hooks', 'format', 'db'): |
|
159 | 159 | if not os.path.exists(os.path.join(path, x)): |
|
160 | 160 | return False |
|
161 | 161 | return True |
|
162 | 162 | |
|
163 | 163 | # Check to see if a given path is the root of an svn repo over http. We verify |
|
164 | 164 | # this by requesting a version-controlled URL we know can't exist and looking |
|
165 | 165 | # for the svn-specific "not found" XML. |
|
166 | 166 | def httpcheck(ui, path, proto): |
|
167 | 167 | try: |
|
168 | 168 | opener = urllib2.build_opener() |
|
169 | 169 | rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path)) |
|
170 | 170 | data = rsp.read() |
|
171 | 171 | except urllib2.HTTPError, inst: |
|
172 | 172 | if inst.code != 404: |
|
173 | 173 | # Except for 404 we cannot know for sure this is not an svn repo |
|
174 | 174 | ui.warn(_('svn: cannot probe remote repository, assume it could ' |
|
175 | 175 | 'be a subversion repository. Use --source-type if you ' |
|
176 | 176 | 'know better.\n')) |
|
177 | 177 | return True |
|
178 | 178 | data = inst.fp.read() |
|
179 | 179 | except: |
|
180 | 180 | # Could be urllib2.URLError if the URL is invalid or anything else. |
|
181 | 181 | return False |
|
182 | 182 | return '<m:human-readable errcode="160013">' in data |
|
183 | 183 | |
|
184 | 184 | protomap = {'http': httpcheck, |
|
185 | 185 | 'https': httpcheck, |
|
186 | 186 | 'file': filecheck, |
|
187 | 187 | } |
|
188 | 188 | def issvnurl(ui, url): |
|
189 | 189 | try: |
|
190 | 190 | proto, path = url.split('://', 1) |
|
191 | 191 | if proto == 'file': |
|
192 | 192 | path = urllib.url2pathname(path) |
|
193 | 193 | except ValueError: |
|
194 | 194 | proto = 'file' |
|
195 | 195 | path = os.path.abspath(url) |
|
196 | 196 | if proto == 'file': |
|
197 | 197 | path = util.pconvert(path) |
|
198 | 198 | check = protomap.get(proto, lambda *args: False) |
|
199 | 199 | while '/' in path: |
|
200 | 200 | if check(ui, path, proto): |
|
201 | 201 | return True |
|
202 | 202 | path = path.rsplit('/', 1)[0] |
|
203 | 203 | return False |
|
204 | 204 | |
|
205 | 205 | # SVN conversion code stolen from bzr-svn and tailor |
|
206 | 206 | # |
|
207 | 207 | # Subversion looks like a versioned filesystem, branches structures |
|
208 | 208 | # are defined by conventions and not enforced by the tool. First, |
|
209 | 209 | # we define the potential branches (modules) as "trunk" and "branches" |
|
210 | 210 | # children directories. Revisions are then identified by their |
|
211 | 211 | # module and revision number (and a repository identifier). |
|
212 | 212 | # |
|
213 | 213 | # The revision graph is really a tree (or a forest). By default, a |
|
214 | 214 | # revision parent is the previous revision in the same module. If the |
|
215 | 215 | # module directory is copied/moved from another module then the |
|
216 | 216 | # revision is the module root and its parent the source revision in |
|
217 | 217 | # the parent module. A revision has at most one parent. |
|
218 | 218 | # |
|
219 | 219 | class svn_source(converter_source): |
|
220 | 220 | def __init__(self, ui, url, rev=None): |
|
221 | 221 | super(svn_source, self).__init__(ui, url, rev=rev) |
|
222 | 222 | |
|
223 | 223 | if not (url.startswith('svn://') or url.startswith('svn+ssh://') or |
|
224 | 224 | (os.path.exists(url) and |
|
225 | 225 | os.path.exists(os.path.join(url, '.svn'))) or |
|
226 | 226 | issvnurl(ui, url)): |
|
227 | 227 | raise NoRepo(_("%s does not look like a Subversion repository") |
|
228 | 228 | % url) |
|
229 | 229 | if svn is None: |
|
230 | 230 | raise MissingTool(_('Could not load Subversion python bindings')) |
|
231 | 231 | |
|
232 | 232 | try: |
|
233 | 233 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR |
|
234 | 234 | if version < (1, 4): |
|
235 | 235 | raise MissingTool(_('Subversion python bindings %d.%d found, ' |
|
236 | 236 | '1.4 or later required') % version) |
|
237 | 237 | except AttributeError: |
|
238 | 238 | raise MissingTool(_('Subversion python bindings are too old, 1.4 ' |
|
239 | 239 | 'or later required')) |
|
240 | 240 | |
|
241 | 241 | self.lastrevs = {} |
|
242 | 242 | |
|
243 | 243 | latest = None |
|
244 | 244 | try: |
|
245 | 245 | # Support file://path@rev syntax. Useful e.g. to convert |
|
246 | 246 | # deleted branches. |
|
247 | 247 | at = url.rfind('@') |
|
248 | 248 | if at >= 0: |
|
249 | 249 | latest = int(url[at + 1:]) |
|
250 | 250 | url = url[:at] |
|
251 | 251 | except ValueError: |
|
252 | 252 | pass |
|
253 | 253 | self.url = geturl(url) |
|
254 | 254 | self.encoding = 'UTF-8' # Subversion is always nominal UTF-8 |
|
255 | 255 | try: |
|
256 | 256 | self.transport = transport.SvnRaTransport(url=self.url) |
|
257 | 257 | self.ra = self.transport.ra |
|
258 | 258 | self.ctx = self.transport.client |
|
259 | 259 | self.baseurl = svn.ra.get_repos_root(self.ra) |
|
260 | 260 | # Module is either empty or a repository path starting with |
|
261 | 261 | # a slash and not ending with a slash. |
|
262 | 262 | self.module = urllib.unquote(self.url[len(self.baseurl):]) |
|
263 | 263 | self.prevmodule = None |
|
264 | 264 | self.rootmodule = self.module |
|
265 | 265 | self.commits = {} |
|
266 | 266 | self.paths = {} |
|
267 | 267 | self.uuid = svn.ra.get_uuid(self.ra) |
|
268 | 268 | except SubversionException: |
|
269 | 269 | ui.traceback() |
|
270 | 270 | raise NoRepo(_("%s does not look like a Subversion repository") |
|
271 | 271 | % self.url) |
|
272 | 272 | |
|
273 | 273 | if rev: |
|
274 | 274 | try: |
|
275 | 275 | latest = int(rev) |
|
276 | 276 | except ValueError: |
|
277 | 277 | raise util.Abort(_('svn: revision %s is not an integer') % rev) |
|
278 | 278 | |
|
279 | 279 | self.trunkname = self.ui.config('convert', 'svn.trunk', |
|
280 | 280 | 'trunk').strip('/') |
|
281 | 281 | self.startrev = self.ui.config('convert', 'svn.startrev', default=0) |
|
282 | 282 | try: |
|
283 | 283 | self.startrev = int(self.startrev) |
|
284 | 284 | if self.startrev < 0: |
|
285 | 285 | self.startrev = 0 |
|
286 | 286 | except ValueError: |
|
287 | 287 | raise util.Abort(_('svn: start revision %s is not an integer') |
|
288 | 288 | % self.startrev) |
|
289 | 289 | |
|
290 | 290 | try: |
|
291 | 291 | self.head = self.latest(self.module, latest) |
|
292 | 292 | except SvnPathNotFound: |
|
293 | 293 | self.head = None |
|
294 | 294 | if not self.head: |
|
295 | 295 | raise util.Abort(_('no revision found in module %s') |
|
296 | 296 | % self.module) |
|
297 | 297 | self.last_changed = self.revnum(self.head) |
|
298 | 298 | |
|
299 | 299 | self._changescache = None |
|
300 | 300 | |
|
301 | 301 | if os.path.exists(os.path.join(url, '.svn/entries')): |
|
302 | 302 | self.wc = url |
|
303 | 303 | else: |
|
304 | 304 | self.wc = None |
|
305 | 305 | self.convertfp = None |
|
306 | 306 | |
|
307 | 307 | def setrevmap(self, revmap): |
|
308 | 308 | lastrevs = {} |
|
309 | 309 | for revid in revmap.iterkeys(): |
|
310 | 310 | uuid, module, revnum = revsplit(revid) |
|
311 | 311 | lastrevnum = lastrevs.setdefault(module, revnum) |
|
312 | 312 | if revnum > lastrevnum: |
|
313 | 313 | lastrevs[module] = revnum |
|
314 | 314 | self.lastrevs = lastrevs |
|
315 | 315 | |
|
316 | 316 | def exists(self, path, optrev): |
|
317 | 317 | try: |
|
318 | 318 | svn.client.ls(self.url.rstrip('/') + '/' + quote(path), |
|
319 | 319 | optrev, False, self.ctx) |
|
320 | 320 | return True |
|
321 | 321 | except SubversionException: |
|
322 | 322 | return False |
|
323 | 323 | |
|
324 | 324 | def getheads(self): |
|
325 | 325 | |
|
326 | 326 | def isdir(path, revnum): |
|
327 | 327 | kind = self._checkpath(path, revnum) |
|
328 | 328 | return kind == svn.core.svn_node_dir |
|
329 | 329 | |
|
330 | 330 | def getcfgpath(name, rev): |
|
331 | 331 | cfgpath = self.ui.config('convert', 'svn.' + name) |
|
332 | 332 | if cfgpath is not None and cfgpath.strip() == '': |
|
333 | 333 | return None |
|
334 | 334 | path = (cfgpath or name).strip('/') |
|
335 | 335 | if not self.exists(path, rev): |
|
336 | 336 | if self.module.endswith(path) and name == 'trunk': |
|
337 | 337 | # we are converting from inside this directory |
|
338 | 338 | return None |
|
339 | 339 | if cfgpath: |
|
340 | 340 | raise util.Abort(_('expected %s to be at %r, but not found') |
|
341 | 341 | % (name, path)) |
|
342 | 342 | return None |
|
343 | 343 | self.ui.note(_('found %s at %r\n') % (name, path)) |
|
344 | 344 | return path |
|
345 | 345 | |
|
346 | 346 | rev = optrev(self.last_changed) |
|
347 | 347 | oldmodule = '' |
|
348 | 348 | trunk = getcfgpath('trunk', rev) |
|
349 | 349 | self.tags = getcfgpath('tags', rev) |
|
350 | 350 | branches = getcfgpath('branches', rev) |
|
351 | 351 | |
|
352 | 352 | # If the project has a trunk or branches, we will extract heads |
|
353 | 353 | # from them. We keep the project root otherwise. |
|
354 | 354 | if trunk: |
|
355 | 355 | oldmodule = self.module or '' |
|
356 | 356 | self.module += '/' + trunk |
|
357 | 357 | self.head = self.latest(self.module, self.last_changed) |
|
358 | 358 | if not self.head: |
|
359 | 359 | raise util.Abort(_('no revision found in module %s') |
|
360 | 360 | % self.module) |
|
361 | 361 | |
|
362 | 362 | # First head in the list is the module's head |
|
363 | 363 | self.heads = [self.head] |
|
364 | 364 | if self.tags is not None: |
|
365 | 365 | self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags')) |
|
366 | 366 | |
|
367 | 367 | # Check if branches bring a few more heads to the list |
|
368 | 368 | if branches: |
|
369 | 369 | rpath = self.url.strip('/') |
|
370 | 370 | branchnames = svn.client.ls(rpath + '/' + quote(branches), |
|
371 | 371 | rev, False, self.ctx) |
|
372 | 372 | for branch in branchnames.keys(): |
|
373 | 373 | module = '%s/%s/%s' % (oldmodule, branches, branch) |
|
374 | 374 | if not isdir(module, self.last_changed): |
|
375 | 375 | continue |
|
376 | 376 | brevid = self.latest(module, self.last_changed) |
|
377 | 377 | if not brevid: |
|
378 | 378 | self.ui.note(_('ignoring empty branch %s\n') % branch) |
|
379 | 379 | continue |
|
380 | 380 | self.ui.note(_('found branch %s at %d\n') % |
|
381 | 381 | (branch, self.revnum(brevid))) |
|
382 | 382 | self.heads.append(brevid) |
|
383 | 383 | |
|
384 | 384 | if self.startrev and self.heads: |
|
385 | 385 | if len(self.heads) > 1: |
|
386 | 386 | raise util.Abort(_('svn: start revision is not supported ' |
|
387 | 387 | 'with more than one branch')) |
|
388 | 388 | revnum = self.revnum(self.heads[0]) |
|
389 | 389 | if revnum < self.startrev: |
|
390 | 390 | raise util.Abort( |
|
391 | 391 | _('svn: no revision found after start revision %d') |
|
392 | 392 | % self.startrev) |
|
393 | 393 | |
|
394 | 394 | return self.heads |
|
395 | 395 | |
|
396 | 396 | def getchanges(self, rev): |
|
397 | 397 | if self._changescache and self._changescache[0] == rev: |
|
398 | 398 | return self._changescache[1] |
|
399 | 399 | self._changescache = None |
|
400 | 400 | (paths, parents) = self.paths[rev] |
|
401 | 401 | if parents: |
|
402 | 402 | files, self.removed, copies = self.expandpaths(rev, paths, parents) |
|
403 | 403 | else: |
|
404 | 404 | # Perform a full checkout on roots |
|
405 | 405 | uuid, module, revnum = revsplit(rev) |
|
406 | 406 | entries = svn.client.ls(self.baseurl + quote(module), |
|
407 | 407 | optrev(revnum), True, self.ctx) |
|
408 | 408 | files = [n for n, e in entries.iteritems() |
|
409 | 409 | if e.kind == svn.core.svn_node_file] |
|
410 | 410 | copies = {} |
|
411 | 411 | self.removed = set() |
|
412 | 412 | |
|
413 | 413 | files.sort() |
|
414 | 414 | files = zip(files, [rev] * len(files)) |
|
415 | 415 | |
|
416 | 416 | # caller caches the result, so free it here to release memory |
|
417 | 417 | del self.paths[rev] |
|
418 | 418 | return (files, copies) |
|
419 | 419 | |
|
420 | 420 | def getchangedfiles(self, rev, i): |
|
421 | 421 | changes = self.getchanges(rev) |
|
422 | 422 | self._changescache = (rev, changes) |
|
423 | 423 | return [f[0] for f in changes[0]] |
|
424 | 424 | |
|
425 | 425 | def getcommit(self, rev): |
|
426 | 426 | if rev not in self.commits: |
|
427 | 427 | uuid, module, revnum = revsplit(rev) |
|
428 | 428 | self.module = module |
|
429 | 429 | self.reparent(module) |
|
430 | 430 | # We assume that: |
|
431 | 431 | # - requests for revisions after "stop" come from the |
|
432 | 432 | # revision graph backward traversal. Cache all of them |
|
433 | 433 | # down to stop, they will be used eventually. |
|
434 | 434 | # - requests for revisions before "stop" come to get |
|
435 | 435 | # isolated branches parents. Just fetch what is needed. |
|
436 | 436 | stop = self.lastrevs.get(module, 0) |
|
437 | 437 | if revnum < stop: |
|
438 | 438 | stop = revnum + 1 |
|
439 | 439 | self._fetch_revisions(revnum, stop) |
|
440 | 440 | if rev not in self.commits: |
|
441 | 441 | raise util.Abort(_('svn: revision %s not found') % revnum) |
|
442 | 442 | commit = self.commits[rev] |
|
443 | 443 | # caller caches the result, so free it here to release memory |
|
444 | 444 | del self.commits[rev] |
|
445 | 445 | return commit |
|
446 | 446 | |
|
447 | 447 | def gettags(self): |
|
448 | 448 | tags = {} |
|
449 | 449 | if self.tags is None: |
|
450 | 450 | return tags |
|
451 | 451 | |
|
452 | 452 | # svn tags are just a convention, project branches left in a |
|
453 | 453 | # 'tags' directory. There is no other relationship than |
|
454 | 454 | # ancestry, which is expensive to discover and makes them hard |
|
455 | 455 | # to update incrementally. Worse, past revisions may be |
|
456 | 456 | # referenced by tags far away in the future, requiring a deep |
|
457 | 457 | # history traversal on every calculation. Current code |
|
458 | 458 | # performs a single backward traversal, tracking moves within |
|
459 | 459 | # the tags directory (tag renaming) and recording a new tag |
|
460 | 460 | # everytime a project is copied from outside the tags |
|
461 | 461 | # directory. It also lists deleted tags, this behaviour may |
|
462 | 462 | # change in the future. |
|
463 | 463 | pendings = [] |
|
464 | 464 | tagspath = self.tags |
|
465 | 465 | start = svn.ra.get_latest_revnum(self.ra) |
|
466 | 466 | stream = self._getlog([self.tags], start, self.startrev) |
|
467 | 467 | try: |
|
468 | 468 | for entry in stream: |
|
469 | 469 | origpaths, revnum, author, date, message = entry |
|
470 | 470 | copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e |
|
471 | 471 | in origpaths.iteritems() if e.copyfrom_path] |
|
472 | 472 | # Apply moves/copies from more specific to general |
|
473 | 473 | copies.sort(reverse=True) |
|
474 | 474 | |
|
475 | 475 | srctagspath = tagspath |
|
476 | 476 | if copies and copies[-1][2] == tagspath: |
|
477 | 477 | # Track tags directory moves |
|
478 | 478 | srctagspath = copies.pop()[0] |
|
479 | 479 | |
|
480 | 480 | for source, sourcerev, dest in copies: |
|
481 | 481 | if not dest.startswith(tagspath + '/'): |
|
482 | 482 | continue |
|
483 | 483 | for tag in pendings: |
|
484 | 484 | if tag[0].startswith(dest): |
|
485 | 485 | tagpath = source + tag[0][len(dest):] |
|
486 | 486 | tag[:2] = [tagpath, sourcerev] |
|
487 | 487 | break |
|
488 | 488 | else: |
|
489 | 489 | pendings.append([source, sourcerev, dest]) |
|
490 | 490 | |
|
491 | 491 | # Filter out tags with children coming from different |
|
492 | 492 | # parts of the repository like: |
|
493 | 493 | # /tags/tag.1 (from /trunk:10) |
|
494 | 494 | # /tags/tag.1/foo (from /branches/foo:12) |
|
495 | 495 | # Here/tags/tag.1 discarded as well as its children. |
|
496 | 496 | # It happens with tools like cvs2svn. Such tags cannot |
|
497 | 497 | # be represented in mercurial. |
|
498 | 498 | addeds = dict((p, e.copyfrom_path) for p, e |
|
499 | 499 | in origpaths.iteritems() |
|
500 | 500 | if e.action == 'A' and e.copyfrom_path) |
|
501 | 501 | badroots = set() |
|
502 | 502 | for destroot in addeds: |
|
503 | 503 | for source, sourcerev, dest in pendings: |
|
504 | 504 | if (not dest.startswith(destroot + '/') |
|
505 | 505 | or source.startswith(addeds[destroot] + '/')): |
|
506 | 506 | continue |
|
507 | 507 | badroots.add(destroot) |
|
508 | 508 | break |
|
509 | 509 | |
|
510 | 510 | for badroot in badroots: |
|
511 | 511 | pendings = [p for p in pendings if p[2] != badroot |
|
512 | 512 | and not p[2].startswith(badroot + '/')] |
|
513 | 513 | |
|
514 | 514 | # Tell tag renamings from tag creations |
|
515 | 515 | renamings = [] |
|
516 | 516 | for source, sourcerev, dest in pendings: |
|
517 | 517 | tagname = dest.split('/')[-1] |
|
518 | 518 | if source.startswith(srctagspath): |
|
519 | 519 | renamings.append([source, sourcerev, tagname]) |
|
520 | 520 | continue |
|
521 | 521 | if tagname in tags: |
|
522 | 522 | # Keep the latest tag value |
|
523 | 523 | continue |
|
524 | 524 | # From revision may be fake, get one with changes |
|
525 | 525 | try: |
|
526 | 526 | tagid = self.latest(source, sourcerev) |
|
527 | 527 | if tagid and tagname not in tags: |
|
528 | 528 | tags[tagname] = tagid |
|
529 | 529 | except SvnPathNotFound: |
|
530 | 530 | # It happens when we are following directories |
|
531 | 531 | # we assumed were copied with their parents |
|
532 | 532 | # but were really created in the tag |
|
533 | 533 | # directory. |
|
534 | 534 | pass |
|
535 | 535 | pendings = renamings |
|
536 | 536 | tagspath = srctagspath |
|
537 | 537 | finally: |
|
538 | 538 | stream.close() |
|
539 | 539 | return tags |
|
540 | 540 | |
|
541 | 541 | def converted(self, rev, destrev): |
|
542 | 542 | if not self.wc: |
|
543 | 543 | return |
|
544 | 544 | if self.convertfp is None: |
|
545 | 545 | self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'), |
|
546 | 546 | 'a') |
|
547 | 547 | self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev))) |
|
548 | 548 | self.convertfp.flush() |
|
549 | 549 | |
|
550 | 550 | def revid(self, revnum, module=None): |
|
551 | 551 | return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum) |
|
552 | 552 | |
|
553 | 553 | def revnum(self, rev): |
|
554 | 554 | return int(rev.split('@')[-1]) |
|
555 | 555 | |
|
556 | 556 | def latest(self, path, stop=None): |
|
557 | 557 | """Find the latest revid affecting path, up to stop revision |
|
558 | 558 | number. If stop is None, default to repository latest |
|
559 | 559 | revision. It may return a revision in a different module, |
|
560 | 560 | since a branch may be moved without a change being |
|
561 | 561 | reported. Return None if computed module does not belong to |
|
562 | 562 | rootmodule subtree. |
|
563 | 563 | """ |
|
564 | 564 | def findchanges(path, start, stop=None): |
|
565 | 565 | stream = self._getlog([path], start, stop or 1) |
|
566 | 566 | try: |
|
567 | 567 | for entry in stream: |
|
568 | 568 | paths, revnum, author, date, message = entry |
|
569 | 569 | if stop is None and paths: |
|
570 | 570 | # We do not know the latest changed revision, |
|
571 | 571 | # keep the first one with changed paths. |
|
572 | 572 | break |
|
573 | 573 | if revnum <= stop: |
|
574 | 574 | break |
|
575 | 575 | |
|
576 | 576 | for p in paths: |
|
577 | 577 | if (not path.startswith(p) or |
|
578 | 578 | not paths[p].copyfrom_path): |
|
579 | 579 | continue |
|
580 | 580 | newpath = paths[p].copyfrom_path + path[len(p):] |
|
581 | 581 | self.ui.debug("branch renamed from %s to %s at %d\n" % |
|
582 | 582 | (path, newpath, revnum)) |
|
583 | 583 | path = newpath |
|
584 | 584 | break |
|
585 | 585 | if not paths: |
|
586 | 586 | revnum = None |
|
587 | 587 | return revnum, path |
|
588 | 588 | finally: |
|
589 | 589 | stream.close() |
|
590 | 590 | |
|
591 | 591 | if not path.startswith(self.rootmodule): |
|
592 | 592 | # Requests on foreign branches may be forbidden at server level |
|
593 | 593 | self.ui.debug('ignoring foreign branch %r\n' % path) |
|
594 | 594 | return None |
|
595 | 595 | |
|
596 | 596 | if stop is None: |
|
597 | 597 | stop = svn.ra.get_latest_revnum(self.ra) |
|
598 | 598 | try: |
|
599 | 599 | prevmodule = self.reparent('') |
|
600 | 600 | dirent = svn.ra.stat(self.ra, path.strip('/'), stop) |
|
601 | 601 | self.reparent(prevmodule) |
|
602 | 602 | except SubversionException: |
|
603 | 603 | dirent = None |
|
604 | 604 | if not dirent: |
|
605 | 605 | raise SvnPathNotFound(_('%s not found up to revision %d') |
|
606 | 606 | % (path, stop)) |
|
607 | 607 | |
|
608 | 608 | # stat() gives us the previous revision on this line of |
|
609 | 609 | # development, but it might be in *another module*. Fetch the |
|
610 | 610 | # log and detect renames down to the latest revision. |
|
611 | 611 | revnum, realpath = findchanges(path, stop, dirent.created_rev) |
|
612 | 612 | if revnum is None: |
|
613 | 613 | # Tools like svnsync can create empty revision, when |
|
614 | 614 | # synchronizing only a subtree for instance. These empty |
|
615 | 615 | # revisions created_rev still have their original values |
|
616 | 616 | # despite all changes having disappeared and can be |
|
617 | 617 | # returned by ra.stat(), at least when stating the root |
|
618 | 618 | # module. In that case, do not trust created_rev and scan |
|
619 | 619 | # the whole history. |
|
620 | 620 | revnum, realpath = findchanges(path, stop) |
|
621 | 621 | if revnum is None: |
|
622 | 622 | self.ui.debug('ignoring empty branch %r\n' % realpath) |
|
623 | 623 | return None |
|
624 | 624 | |
|
625 | 625 | if not realpath.startswith(self.rootmodule): |
|
626 | 626 | self.ui.debug('ignoring foreign branch %r\n' % realpath) |
|
627 | 627 | return None |
|
628 | 628 | return self.revid(revnum, realpath) |
|
629 | 629 | |
|
630 | 630 | def reparent(self, module): |
|
631 | 631 | """Reparent the svn transport and return the previous parent.""" |
|
632 | 632 | if self.prevmodule == module: |
|
633 | 633 | return module |
|
634 | 634 | svnurl = self.baseurl + quote(module) |
|
635 | 635 | prevmodule = self.prevmodule |
|
636 | 636 | if prevmodule is None: |
|
637 | 637 | prevmodule = '' |
|
638 | 638 | self.ui.debug("reparent to %s\n" % svnurl) |
|
639 | 639 | svn.ra.reparent(self.ra, svnurl) |
|
640 | 640 | self.prevmodule = module |
|
641 | 641 | return prevmodule |
|
642 | 642 | |
|
643 | 643 | def expandpaths(self, rev, paths, parents): |
|
644 | 644 | changed, removed = set(), set() |
|
645 | 645 | copies = {} |
|
646 | 646 | |
|
647 | 647 | new_module, revnum = revsplit(rev)[1:] |
|
648 | 648 | if new_module != self.module: |
|
649 | 649 | self.module = new_module |
|
650 | 650 | self.reparent(self.module) |
|
651 | 651 | |
|
652 | 652 | for i, (path, ent) in enumerate(paths): |
|
653 | 653 | self.ui.progress(_('scanning paths'), i, item=path, |
|
654 | 654 | total=len(paths)) |
|
655 | 655 | entrypath = self.getrelpath(path) |
|
656 | 656 | |
|
657 | 657 | kind = self._checkpath(entrypath, revnum) |
|
658 | 658 | if kind == svn.core.svn_node_file: |
|
659 | 659 | changed.add(self.recode(entrypath)) |
|
660 | 660 | if not ent.copyfrom_path or not parents: |
|
661 | 661 | continue |
|
662 | 662 | # Copy sources not in parent revisions cannot be |
|
663 | 663 | # represented, ignore their origin for now |
|
664 | 664 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
665 | 665 | if ent.copyfrom_rev < prevnum: |
|
666 | 666 | continue |
|
667 | 667 | copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule) |
|
668 | 668 | if not copyfrom_path: |
|
669 | 669 | continue |
|
670 | 670 | self.ui.debug("copied to %s from %s@%s\n" % |
|
671 | 671 | (entrypath, copyfrom_path, ent.copyfrom_rev)) |
|
672 | 672 | copies[self.recode(entrypath)] = self.recode(copyfrom_path) |
|
673 | 673 | elif kind == 0: # gone, but had better be a deleted *file* |
|
674 | 674 | self.ui.debug("gone from %s\n" % ent.copyfrom_rev) |
|
675 | 675 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
676 | 676 | parentpath = pmodule + "/" + entrypath |
|
677 | 677 | fromkind = self._checkpath(entrypath, prevnum, pmodule) |
|
678 | 678 | |
|
679 | 679 | if fromkind == svn.core.svn_node_file: |
|
680 | 680 | removed.add(self.recode(entrypath)) |
|
681 | 681 | elif fromkind == svn.core.svn_node_dir: |
|
682 | 682 | oroot = parentpath.strip('/') |
|
683 | 683 | nroot = path.strip('/') |
|
684 | 684 | children = self._iterfiles(oroot, prevnum) |
|
685 | 685 | for childpath in children: |
|
686 | 686 | childpath = childpath.replace(oroot, nroot) |
|
687 | 687 | childpath = self.getrelpath("/" + childpath, pmodule) |
|
688 | 688 | if childpath: |
|
689 | 689 | removed.add(self.recode(childpath)) |
|
690 | 690 | else: |
|
691 | 691 | self.ui.debug('unknown path in revision %d: %s\n' % \ |
|
692 | 692 | (revnum, path)) |
|
693 | 693 | elif kind == svn.core.svn_node_dir: |
|
694 | 694 | if ent.action == 'M': |
|
695 | 695 | # If the directory just had a prop change, |
|
696 | 696 | # then we shouldn't need to look for its children. |
|
697 | 697 | continue |
|
698 | 698 | if ent.action == 'R' and parents: |
|
699 | 699 | # If a directory is replacing a file, mark the previous |
|
700 | 700 | # file as deleted |
|
701 | 701 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
702 | 702 | pkind = self._checkpath(entrypath, prevnum, pmodule) |
|
703 | 703 | if pkind == svn.core.svn_node_file: |
|
704 | 704 | removed.add(self.recode(entrypath)) |
|
705 | 705 | elif pkind == svn.core.svn_node_dir: |
|
706 | 706 | # We do not know what files were kept or removed, |
|
707 | 707 | # mark them all as changed. |
|
708 | 708 | for childpath in self._iterfiles(pmodule, prevnum): |
|
709 | 709 | childpath = self.getrelpath("/" + childpath) |
|
710 | 710 | if childpath: |
|
711 | 711 | changed.add(self.recode(childpath)) |
|
712 | 712 | |
|
713 | 713 | for childpath in self._iterfiles(path, revnum): |
|
714 | 714 | childpath = self.getrelpath("/" + childpath) |
|
715 | 715 | if childpath: |
|
716 | 716 | changed.add(self.recode(childpath)) |
|
717 | 717 | |
|
718 | 718 | # Handle directory copies |
|
719 | 719 | if not ent.copyfrom_path or not parents: |
|
720 | 720 | continue |
|
721 | 721 | # Copy sources not in parent revisions cannot be |
|
722 | 722 | # represented, ignore their origin for now |
|
723 | 723 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
724 | 724 | if ent.copyfrom_rev < prevnum: |
|
725 | 725 | continue |
|
726 | 726 | copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule) |
|
727 | 727 | if not copyfrompath: |
|
728 | 728 | continue |
|
729 | 729 | self.ui.debug("mark %s came from %s:%d\n" |
|
730 | 730 | % (path, copyfrompath, ent.copyfrom_rev)) |
|
731 | 731 | children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev) |
|
732 | 732 | for childpath in children: |
|
733 | 733 | childpath = self.getrelpath("/" + childpath, pmodule) |
|
734 | 734 | if not childpath: |
|
735 | 735 | continue |
|
736 | 736 | copytopath = path + childpath[len(copyfrompath):] |
|
737 | 737 | copytopath = self.getrelpath(copytopath) |
|
738 | 738 | copies[self.recode(copytopath)] = self.recode(childpath) |
|
739 | 739 | |
|
740 | 740 | self.ui.progress(_('scanning paths'), None) |
|
741 | 741 | changed.update(removed) |
|
742 | 742 | return (list(changed), removed, copies) |
|
743 | 743 | |
|
744 | 744 | def _fetch_revisions(self, from_revnum, to_revnum): |
|
745 | 745 | if from_revnum < to_revnum: |
|
746 | 746 | from_revnum, to_revnum = to_revnum, from_revnum |
|
747 | 747 | |
|
748 | 748 | self.child_cset = None |
|
749 | 749 | |
|
750 | 750 | def parselogentry(orig_paths, revnum, author, date, message): |
|
751 | 751 | """Return the parsed commit object or None, and True if |
|
752 | 752 | the revision is a branch root. |
|
753 | 753 | """ |
|
754 | 754 | self.ui.debug("parsing revision %d (%d changes)\n" % |
|
755 | 755 | (revnum, len(orig_paths))) |
|
756 | 756 | |
|
757 | 757 | branched = False |
|
758 | 758 | rev = self.revid(revnum) |
|
759 | 759 | # branch log might return entries for a parent we already have |
|
760 | 760 | |
|
761 | 761 | if rev in self.commits or revnum < to_revnum: |
|
762 | 762 | return None, branched |
|
763 | 763 | |
|
764 | 764 | parents = [] |
|
765 | 765 | # check whether this revision is the start of a branch or part |
|
766 | 766 | # of a branch renaming |
|
767 | 767 | orig_paths = sorted(orig_paths.iteritems()) |
|
768 | 768 | root_paths = [(p, e) for p, e in orig_paths |
|
769 | 769 | if self.module.startswith(p)] |
|
770 | 770 | if root_paths: |
|
771 | 771 | path, ent = root_paths[-1] |
|
772 | 772 | if ent.copyfrom_path: |
|
773 | 773 | branched = True |
|
774 | 774 | newpath = ent.copyfrom_path + self.module[len(path):] |
|
775 | 775 | # ent.copyfrom_rev may not be the actual last revision |
|
776 | 776 | previd = self.latest(newpath, ent.copyfrom_rev) |
|
777 | 777 | if previd is not None: |
|
778 | 778 | prevmodule, prevnum = revsplit(previd)[1:] |
|
779 | 779 | if prevnum >= self.startrev: |
|
780 | 780 | parents = [previd] |
|
781 | 781 | self.ui.note( |
|
782 | 782 | _('found parent of branch %s at %d: %s\n') % |
|
783 | 783 | (self.module, prevnum, prevmodule)) |
|
784 | 784 | else: |
|
785 | 785 | self.ui.debug("no copyfrom path, don't know what to do.\n") |
|
786 | 786 | |
|
787 | 787 | paths = [] |
|
788 | 788 | # filter out unrelated paths |
|
789 | 789 | for path, ent in orig_paths: |
|
790 | 790 | if self.getrelpath(path) is None: |
|
791 | 791 | continue |
|
792 | 792 | paths.append((path, ent)) |
|
793 | 793 | |
|
794 | 794 | # Example SVN datetime. Includes microseconds. |
|
795 | 795 | # ISO-8601 conformant |
|
796 | 796 | # '2007-01-04T17:35:00.902377Z' |
|
797 | 797 | date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"]) |
|
798 | 798 | |
|
799 | 799 | log = message and self.recode(message) or '' |
|
800 | 800 | author = author and self.recode(author) or '' |
|
801 | 801 | try: |
|
802 | 802 | branch = self.module.split("/")[-1] |
|
803 | 803 | if branch == self.trunkname: |
|
804 | 804 | branch = None |
|
805 | 805 | except IndexError: |
|
806 | 806 | branch = None |
|
807 | 807 | |
|
808 | 808 | cset = commit(author=author, |
|
809 | 809 | date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), |
|
810 | 810 | desc=log, |
|
811 | 811 | parents=parents, |
|
812 | 812 | branch=branch, |
|
813 | 813 | rev=rev) |
|
814 | 814 | |
|
815 | 815 | self.commits[rev] = cset |
|
816 | 816 | # The parents list is *shared* among self.paths and the |
|
817 | 817 | # commit object. Both will be updated below. |
|
818 | 818 | self.paths[rev] = (paths, cset.parents) |
|
819 | 819 | if self.child_cset and not self.child_cset.parents: |
|
820 | 820 | self.child_cset.parents[:] = [rev] |
|
821 | 821 | self.child_cset = cset |
|
822 | 822 | return cset, branched |
|
823 | 823 | |
|
824 | 824 | self.ui.note(_('fetching revision log for "%s" from %d to %d\n') % |
|
825 | 825 | (self.module, from_revnum, to_revnum)) |
|
826 | 826 | |
|
827 | 827 | try: |
|
828 | 828 | firstcset = None |
|
829 | 829 | lastonbranch = False |
|
830 | 830 | stream = self._getlog([self.module], from_revnum, to_revnum) |
|
831 | 831 | try: |
|
832 | 832 | for entry in stream: |
|
833 | 833 | paths, revnum, author, date, message = entry |
|
834 | 834 | if revnum < self.startrev: |
|
835 | 835 | lastonbranch = True |
|
836 | 836 | break |
|
837 | 837 | if not paths: |
|
838 | 838 | self.ui.debug('revision %d has no entries\n' % revnum) |
|
839 | 839 | # If we ever leave the loop on an empty |
|
840 | 840 | # revision, do not try to get a parent branch |
|
841 | 841 | lastonbranch = lastonbranch or revnum == 0 |
|
842 | 842 | continue |
|
843 | 843 | cset, lastonbranch = parselogentry(paths, revnum, author, |
|
844 | 844 | date, message) |
|
845 | 845 | if cset: |
|
846 | 846 | firstcset = cset |
|
847 | 847 | if lastonbranch: |
|
848 | 848 | break |
|
849 | 849 | finally: |
|
850 | 850 | stream.close() |
|
851 | 851 | |
|
852 | 852 | if not lastonbranch and firstcset and not firstcset.parents: |
|
853 | 853 | # The first revision of the sequence (the last fetched one) |
|
854 | 854 | # has invalid parents if not a branch root. Find the parent |
|
855 | 855 | # revision now, if any. |
|
856 | 856 | try: |
|
857 | 857 | firstrevnum = self.revnum(firstcset.rev) |
|
858 | 858 | if firstrevnum > 1: |
|
859 | 859 | latest = self.latest(self.module, firstrevnum - 1) |
|
860 | 860 | if latest: |
|
861 | 861 | firstcset.parents.append(latest) |
|
862 | 862 | except SvnPathNotFound: |
|
863 | 863 | pass |
|
864 | 864 | except SubversionException, (inst, num): |
|
865 | 865 | if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION: |
|
866 | 866 | raise util.Abort(_('svn: branch has no revision %s') |
|
867 | 867 | % to_revnum) |
|
868 | 868 | raise |
|
869 | 869 | |
|
870 | 870 | def getfile(self, file, rev): |
|
871 | 871 | # TODO: ra.get_file transmits the whole file instead of diffs. |
|
872 | 872 | if file in self.removed: |
|
873 |
raise IOError |
|
|
873 | raise IOError | |
|
874 | 874 | mode = '' |
|
875 | 875 | try: |
|
876 | 876 | new_module, revnum = revsplit(rev)[1:] |
|
877 | 877 | if self.module != new_module: |
|
878 | 878 | self.module = new_module |
|
879 | 879 | self.reparent(self.module) |
|
880 | 880 | io = StringIO() |
|
881 | 881 | info = svn.ra.get_file(self.ra, file, revnum, io) |
|
882 | 882 | data = io.getvalue() |
|
883 | 883 | # ra.get_files() seems to keep a reference on the input buffer |
|
884 | 884 | # preventing collection. Release it explicitely. |
|
885 | 885 | io.close() |
|
886 | 886 | if isinstance(info, list): |
|
887 | 887 | info = info[-1] |
|
888 | 888 | mode = ("svn:executable" in info) and 'x' or '' |
|
889 | 889 | mode = ("svn:special" in info) and 'l' or mode |
|
890 | 890 | except SubversionException, e: |
|
891 | 891 | notfound = (svn.core.SVN_ERR_FS_NOT_FOUND, |
|
892 | 892 | svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND) |
|
893 | 893 | if e.apr_err in notfound: # File not found |
|
894 |
raise IOError |
|
|
894 | raise IOError | |
|
895 | 895 | raise |
|
896 | 896 | if mode == 'l': |
|
897 | 897 | link_prefix = "link " |
|
898 | 898 | if data.startswith(link_prefix): |
|
899 | 899 | data = data[len(link_prefix):] |
|
900 | 900 | return data, mode |
|
901 | 901 | |
|
902 | 902 | def _iterfiles(self, path, revnum): |
|
903 | 903 | """Enumerate all files in path at revnum, recursively.""" |
|
904 | 904 | path = path.strip('/') |
|
905 | 905 | pool = Pool() |
|
906 | 906 | rpath = '/'.join([self.baseurl, quote(path)]).strip('/') |
|
907 | 907 | entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool) |
|
908 | 908 | if path: |
|
909 | 909 | path += '/' |
|
910 | 910 | return ((path + p) for p, e in entries.iteritems() |
|
911 | 911 | if e.kind == svn.core.svn_node_file) |
|
912 | 912 | |
|
913 | 913 | def getrelpath(self, path, module=None): |
|
914 | 914 | if module is None: |
|
915 | 915 | module = self.module |
|
916 | 916 | # Given the repository url of this wc, say |
|
917 | 917 | # "http://server/plone/CMFPlone/branches/Plone-2_0-branch" |
|
918 | 918 | # extract the "entry" portion (a relative path) from what |
|
919 | 919 | # svn log --xml says, ie |
|
920 | 920 | # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py" |
|
921 | 921 | # that is to say "tests/PloneTestCase.py" |
|
922 | 922 | if path.startswith(module): |
|
923 | 923 | relative = path.rstrip('/')[len(module):] |
|
924 | 924 | if relative.startswith('/'): |
|
925 | 925 | return relative[1:] |
|
926 | 926 | elif relative == '': |
|
927 | 927 | return relative |
|
928 | 928 | |
|
929 | 929 | # The path is outside our tracked tree... |
|
930 | 930 | self.ui.debug('%r is not under %r, ignoring\n' % (path, module)) |
|
931 | 931 | return None |
|
932 | 932 | |
|
933 | 933 | def _checkpath(self, path, revnum, module=None): |
|
934 | 934 | if module is not None: |
|
935 | 935 | prevmodule = self.reparent('') |
|
936 | 936 | path = module + '/' + path |
|
937 | 937 | try: |
|
938 | 938 | # ra.check_path does not like leading slashes very much, it leads |
|
939 | 939 | # to PROPFIND subversion errors |
|
940 | 940 | return svn.ra.check_path(self.ra, path.strip('/'), revnum) |
|
941 | 941 | finally: |
|
942 | 942 | if module is not None: |
|
943 | 943 | self.reparent(prevmodule) |
|
944 | 944 | |
|
945 | 945 | def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True, |
|
946 | 946 | strict_node_history=False): |
|
947 | 947 | # Normalize path names, svn >= 1.5 only wants paths relative to |
|
948 | 948 | # supplied URL |
|
949 | 949 | relpaths = [] |
|
950 | 950 | for p in paths: |
|
951 | 951 | if not p.startswith('/'): |
|
952 | 952 | p = self.module + '/' + p |
|
953 | 953 | relpaths.append(p.strip('/')) |
|
954 | 954 | args = [self.baseurl, relpaths, start, end, limit, |
|
955 | 955 | discover_changed_paths, strict_node_history] |
|
956 | 956 | arg = encodeargs(args) |
|
957 | 957 | hgexe = util.hgexecutable() |
|
958 | 958 | cmd = '%s debugsvnlog' % util.shellquote(hgexe) |
|
959 | 959 | stdin, stdout = util.popen2(util.quotecommand(cmd)) |
|
960 | 960 | stdin.write(arg) |
|
961 | 961 | try: |
|
962 | 962 | stdin.close() |
|
963 | 963 | except IOError: |
|
964 | 964 | raise util.Abort(_('Mercurial failed to run itself, check' |
|
965 | 965 | ' hg executable is in PATH')) |
|
966 | 966 | return logstream(stdout) |
|
967 | 967 | |
|
968 | 968 | pre_revprop_change = '''#!/bin/sh |
|
969 | 969 | |
|
970 | 970 | REPOS="$1" |
|
971 | 971 | REV="$2" |
|
972 | 972 | USER="$3" |
|
973 | 973 | PROPNAME="$4" |
|
974 | 974 | ACTION="$5" |
|
975 | 975 | |
|
976 | 976 | if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi |
|
977 | 977 | if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi |
|
978 | 978 | if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi |
|
979 | 979 | |
|
980 | 980 | echo "Changing prohibited revision property" >&2 |
|
981 | 981 | exit 1 |
|
982 | 982 | ''' |
|
983 | 983 | |
|
984 | 984 | class svn_sink(converter_sink, commandline): |
|
985 | 985 | commit_re = re.compile(r'Committed revision (\d+).', re.M) |
|
986 | 986 | uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M) |
|
987 | 987 | |
|
988 | 988 | def prerun(self): |
|
989 | 989 | if self.wc: |
|
990 | 990 | os.chdir(self.wc) |
|
991 | 991 | |
|
992 | 992 | def postrun(self): |
|
993 | 993 | if self.wc: |
|
994 | 994 | os.chdir(self.cwd) |
|
995 | 995 | |
|
996 | 996 | def join(self, name): |
|
997 | 997 | return os.path.join(self.wc, '.svn', name) |
|
998 | 998 | |
|
999 | 999 | def revmapfile(self): |
|
1000 | 1000 | return self.join('hg-shamap') |
|
1001 | 1001 | |
|
1002 | 1002 | def authorfile(self): |
|
1003 | 1003 | return self.join('hg-authormap') |
|
1004 | 1004 | |
|
1005 | 1005 | def __init__(self, ui, path): |
|
1006 | 1006 | |
|
1007 | 1007 | converter_sink.__init__(self, ui, path) |
|
1008 | 1008 | commandline.__init__(self, ui, 'svn') |
|
1009 | 1009 | self.delete = [] |
|
1010 | 1010 | self.setexec = [] |
|
1011 | 1011 | self.delexec = [] |
|
1012 | 1012 | self.copies = [] |
|
1013 | 1013 | self.wc = None |
|
1014 | 1014 | self.cwd = os.getcwd() |
|
1015 | 1015 | |
|
1016 | 1016 | path = os.path.realpath(path) |
|
1017 | 1017 | |
|
1018 | 1018 | created = False |
|
1019 | 1019 | if os.path.isfile(os.path.join(path, '.svn', 'entries')): |
|
1020 | 1020 | self.wc = path |
|
1021 | 1021 | self.run0('update') |
|
1022 | 1022 | else: |
|
1023 | 1023 | wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc') |
|
1024 | 1024 | |
|
1025 | 1025 | if os.path.isdir(os.path.dirname(path)): |
|
1026 | 1026 | if not os.path.exists(os.path.join(path, 'db', 'fs-type')): |
|
1027 | 1027 | ui.status(_('initializing svn repository %r\n') % |
|
1028 | 1028 | os.path.basename(path)) |
|
1029 | 1029 | commandline(ui, 'svnadmin').run0('create', path) |
|
1030 | 1030 | created = path |
|
1031 | 1031 | path = util.normpath(path) |
|
1032 | 1032 | if not path.startswith('/'): |
|
1033 | 1033 | path = '/' + path |
|
1034 | 1034 | path = 'file://' + path |
|
1035 | 1035 | |
|
1036 | 1036 | ui.status(_('initializing svn working copy %r\n') |
|
1037 | 1037 | % os.path.basename(wcpath)) |
|
1038 | 1038 | self.run0('checkout', path, wcpath) |
|
1039 | 1039 | |
|
1040 | 1040 | self.wc = wcpath |
|
1041 | 1041 | self.opener = scmutil.opener(self.wc) |
|
1042 | 1042 | self.wopener = scmutil.opener(self.wc) |
|
1043 | 1043 | self.childmap = mapfile(ui, self.join('hg-childmap')) |
|
1044 | 1044 | self.is_exec = util.checkexec(self.wc) and util.isexec or None |
|
1045 | 1045 | |
|
1046 | 1046 | if created: |
|
1047 | 1047 | hook = os.path.join(created, 'hooks', 'pre-revprop-change') |
|
1048 | 1048 | fp = open(hook, 'w') |
|
1049 | 1049 | fp.write(pre_revprop_change) |
|
1050 | 1050 | fp.close() |
|
1051 | 1051 | util.setflags(hook, False, True) |
|
1052 | 1052 | |
|
1053 | 1053 | output = self.run0('info') |
|
1054 | 1054 | self.uuid = self.uuid_re.search(output).group(1).strip() |
|
1055 | 1055 | |
|
1056 | 1056 | def wjoin(self, *names): |
|
1057 | 1057 | return os.path.join(self.wc, *names) |
|
1058 | 1058 | |
|
1059 | 1059 | @propertycache |
|
1060 | 1060 | def manifest(self): |
|
1061 | 1061 | # As of svn 1.7, the "add" command fails when receiving |
|
1062 | 1062 | # already tracked entries, so we have to track and filter them |
|
1063 | 1063 | # ourselves. |
|
1064 | 1064 | m = set() |
|
1065 | 1065 | output = self.run0('ls', recursive=True, xml=True) |
|
1066 | 1066 | doc = xml.dom.minidom.parseString(output) |
|
1067 | 1067 | for e in doc.getElementsByTagName('entry'): |
|
1068 | 1068 | for n in e.childNodes: |
|
1069 | 1069 | if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name': |
|
1070 | 1070 | continue |
|
1071 | 1071 | name = ''.join(c.data for c in n.childNodes |
|
1072 | 1072 | if c.nodeType == c.TEXT_NODE) |
|
1073 | 1073 | # Entries are compared with names coming from |
|
1074 | 1074 | # mercurial, so bytes with undefined encoding. Our |
|
1075 | 1075 | # best bet is to assume they are in local |
|
1076 | 1076 | # encoding. They will be passed to command line calls |
|
1077 | 1077 | # later anyway, so they better be. |
|
1078 | 1078 | m.add(encoding.tolocal(name.encode('utf-8'))) |
|
1079 | 1079 | break |
|
1080 | 1080 | return m |
|
1081 | 1081 | |
|
1082 | 1082 | def putfile(self, filename, flags, data): |
|
1083 | 1083 | if 'l' in flags: |
|
1084 | 1084 | self.wopener.symlink(data, filename) |
|
1085 | 1085 | else: |
|
1086 | 1086 | try: |
|
1087 | 1087 | if os.path.islink(self.wjoin(filename)): |
|
1088 | 1088 | os.unlink(filename) |
|
1089 | 1089 | except OSError: |
|
1090 | 1090 | pass |
|
1091 | 1091 | self.wopener.write(filename, data) |
|
1092 | 1092 | |
|
1093 | 1093 | if self.is_exec: |
|
1094 | 1094 | was_exec = self.is_exec(self.wjoin(filename)) |
|
1095 | 1095 | else: |
|
1096 | 1096 | # On filesystems not supporting execute-bit, there is no way |
|
1097 | 1097 | # to know if it is set but asking subversion. Setting it |
|
1098 | 1098 | # systematically is just as expensive and much simpler. |
|
1099 | 1099 | was_exec = 'x' not in flags |
|
1100 | 1100 | |
|
1101 | 1101 | util.setflags(self.wjoin(filename), False, 'x' in flags) |
|
1102 | 1102 | if was_exec: |
|
1103 | 1103 | if 'x' not in flags: |
|
1104 | 1104 | self.delexec.append(filename) |
|
1105 | 1105 | else: |
|
1106 | 1106 | if 'x' in flags: |
|
1107 | 1107 | self.setexec.append(filename) |
|
1108 | 1108 | |
|
1109 | 1109 | def _copyfile(self, source, dest): |
|
1110 | 1110 | # SVN's copy command pukes if the destination file exists, but |
|
1111 | 1111 | # our copyfile method expects to record a copy that has |
|
1112 | 1112 | # already occurred. Cross the semantic gap. |
|
1113 | 1113 | wdest = self.wjoin(dest) |
|
1114 | 1114 | exists = os.path.lexists(wdest) |
|
1115 | 1115 | if exists: |
|
1116 | 1116 | fd, tempname = tempfile.mkstemp( |
|
1117 | 1117 | prefix='hg-copy-', dir=os.path.dirname(wdest)) |
|
1118 | 1118 | os.close(fd) |
|
1119 | 1119 | os.unlink(tempname) |
|
1120 | 1120 | os.rename(wdest, tempname) |
|
1121 | 1121 | try: |
|
1122 | 1122 | self.run0('copy', source, dest) |
|
1123 | 1123 | finally: |
|
1124 | 1124 | self.manifest.add(dest) |
|
1125 | 1125 | if exists: |
|
1126 | 1126 | try: |
|
1127 | 1127 | os.unlink(wdest) |
|
1128 | 1128 | except OSError: |
|
1129 | 1129 | pass |
|
1130 | 1130 | os.rename(tempname, wdest) |
|
1131 | 1131 | |
|
1132 | 1132 | def dirs_of(self, files): |
|
1133 | 1133 | dirs = set() |
|
1134 | 1134 | for f in files: |
|
1135 | 1135 | if os.path.isdir(self.wjoin(f)): |
|
1136 | 1136 | dirs.add(f) |
|
1137 | 1137 | for i in strutil.rfindall(f, '/'): |
|
1138 | 1138 | dirs.add(f[:i]) |
|
1139 | 1139 | return dirs |
|
1140 | 1140 | |
|
1141 | 1141 | def add_dirs(self, files): |
|
1142 | 1142 | add_dirs = [d for d in sorted(self.dirs_of(files)) |
|
1143 | 1143 | if d not in self.manifest] |
|
1144 | 1144 | if add_dirs: |
|
1145 | 1145 | self.manifest.update(add_dirs) |
|
1146 | 1146 | self.xargs(add_dirs, 'add', non_recursive=True, quiet=True) |
|
1147 | 1147 | return add_dirs |
|
1148 | 1148 | |
|
1149 | 1149 | def add_files(self, files): |
|
1150 | 1150 | files = [f for f in files if f not in self.manifest] |
|
1151 | 1151 | if files: |
|
1152 | 1152 | self.manifest.update(files) |
|
1153 | 1153 | self.xargs(files, 'add', quiet=True) |
|
1154 | 1154 | return files |
|
1155 | 1155 | |
|
1156 | 1156 | def tidy_dirs(self, names): |
|
1157 | 1157 | deleted = [] |
|
1158 | 1158 | for d in sorted(self.dirs_of(names), reverse=True): |
|
1159 | 1159 | wd = self.wjoin(d) |
|
1160 | 1160 | if os.listdir(wd) == '.svn': |
|
1161 | 1161 | self.run0('delete', d) |
|
1162 | 1162 | self.manifest.remove(d) |
|
1163 | 1163 | deleted.append(d) |
|
1164 | 1164 | return deleted |
|
1165 | 1165 | |
|
1166 | 1166 | def addchild(self, parent, child): |
|
1167 | 1167 | self.childmap[parent] = child |
|
1168 | 1168 | |
|
1169 | 1169 | def revid(self, rev): |
|
1170 | 1170 | return u"svn:%s@%s" % (self.uuid, rev) |
|
1171 | 1171 | |
|
1172 | 1172 | def putcommit(self, files, copies, parents, commit, source, revmap): |
|
1173 | 1173 | for parent in parents: |
|
1174 | 1174 | try: |
|
1175 | 1175 | return self.revid(self.childmap[parent]) |
|
1176 | 1176 | except KeyError: |
|
1177 | 1177 | pass |
|
1178 | 1178 | |
|
1179 | 1179 | # Apply changes to working copy |
|
1180 | 1180 | for f, v in files: |
|
1181 | 1181 | try: |
|
1182 | 1182 | data, mode = source.getfile(f, v) |
|
1183 | 1183 | except IOError: |
|
1184 | 1184 | self.delete.append(f) |
|
1185 | 1185 | else: |
|
1186 | 1186 | self.putfile(f, mode, data) |
|
1187 | 1187 | if f in copies: |
|
1188 | 1188 | self.copies.append([copies[f], f]) |
|
1189 | 1189 | files = [f[0] for f in files] |
|
1190 | 1190 | |
|
1191 | 1191 | entries = set(self.delete) |
|
1192 | 1192 | files = frozenset(files) |
|
1193 | 1193 | entries.update(self.add_dirs(files.difference(entries))) |
|
1194 | 1194 | if self.copies: |
|
1195 | 1195 | for s, d in self.copies: |
|
1196 | 1196 | self._copyfile(s, d) |
|
1197 | 1197 | self.copies = [] |
|
1198 | 1198 | if self.delete: |
|
1199 | 1199 | self.xargs(self.delete, 'delete') |
|
1200 | 1200 | for f in self.delete: |
|
1201 | 1201 | self.manifest.remove(f) |
|
1202 | 1202 | self.delete = [] |
|
1203 | 1203 | entries.update(self.add_files(files.difference(entries))) |
|
1204 | 1204 | entries.update(self.tidy_dirs(entries)) |
|
1205 | 1205 | if self.delexec: |
|
1206 | 1206 | self.xargs(self.delexec, 'propdel', 'svn:executable') |
|
1207 | 1207 | self.delexec = [] |
|
1208 | 1208 | if self.setexec: |
|
1209 | 1209 | self.xargs(self.setexec, 'propset', 'svn:executable', '*') |
|
1210 | 1210 | self.setexec = [] |
|
1211 | 1211 | |
|
1212 | 1212 | fd, messagefile = tempfile.mkstemp(prefix='hg-convert-') |
|
1213 | 1213 | fp = os.fdopen(fd, 'w') |
|
1214 | 1214 | fp.write(commit.desc) |
|
1215 | 1215 | fp.close() |
|
1216 | 1216 | try: |
|
1217 | 1217 | output = self.run0('commit', |
|
1218 | 1218 | username=util.shortuser(commit.author), |
|
1219 | 1219 | file=messagefile, |
|
1220 | 1220 | encoding='utf-8') |
|
1221 | 1221 | try: |
|
1222 | 1222 | rev = self.commit_re.search(output).group(1) |
|
1223 | 1223 | except AttributeError: |
|
1224 | 1224 | if not files: |
|
1225 | 1225 | return parents[0] |
|
1226 | 1226 | self.ui.warn(_('unexpected svn output:\n')) |
|
1227 | 1227 | self.ui.warn(output) |
|
1228 | 1228 | raise util.Abort(_('unable to cope with svn output')) |
|
1229 | 1229 | if commit.rev: |
|
1230 | 1230 | self.run('propset', 'hg:convert-rev', commit.rev, |
|
1231 | 1231 | revprop=True, revision=rev) |
|
1232 | 1232 | if commit.branch and commit.branch != 'default': |
|
1233 | 1233 | self.run('propset', 'hg:convert-branch', commit.branch, |
|
1234 | 1234 | revprop=True, revision=rev) |
|
1235 | 1235 | for parent in parents: |
|
1236 | 1236 | self.addchild(parent, rev) |
|
1237 | 1237 | return self.revid(rev) |
|
1238 | 1238 | finally: |
|
1239 | 1239 | os.unlink(messagefile) |
|
1240 | 1240 | |
|
1241 | 1241 | def puttags(self, tags): |
|
1242 | 1242 | self.ui.warn(_('writing Subversion tags is not yet implemented\n')) |
|
1243 | 1243 | return None, None |
|
1244 | 1244 | |
|
1245 | 1245 | def hascommit(self, rev): |
|
1246 | 1246 | # This is not correct as one can convert to an existing subversion |
|
1247 | 1247 | # repository and childmap would not list all revisions. Too bad. |
|
1248 | 1248 | if rev in self.childmap: |
|
1249 | 1249 | return True |
|
1250 | 1250 | raise util.Abort(_('splice map revision %s not found in subversion ' |
|
1251 | 1251 | 'child map (revision lookups are not implemented)') |
|
1252 | 1252 | % rev) |
@@ -1,517 +1,517 | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''High-level command function for lfconvert, plus the cmdtable.''' |
|
10 | 10 | |
|
11 | 11 | import os |
|
12 | 12 | import shutil |
|
13 | 13 | |
|
14 | 14 | from mercurial import util, match as match_, hg, node, context, error, cmdutil |
|
15 | 15 | from mercurial.i18n import _ |
|
16 | 16 | |
|
17 | 17 | import lfutil |
|
18 | 18 | import basestore |
|
19 | 19 | |
|
20 | 20 | # -- Commands ---------------------------------------------------------- |
|
21 | 21 | |
|
22 | 22 | def lfconvert(ui, src, dest, *pats, **opts): |
|
23 | 23 | '''convert a normal repository to a largefiles repository |
|
24 | 24 | |
|
25 | 25 | Convert repository SOURCE to a new repository DEST, identical to |
|
26 | 26 | SOURCE except that certain files will be converted as largefiles: |
|
27 | 27 | specifically, any file that matches any PATTERN *or* whose size is |
|
28 | 28 | above the minimum size threshold is converted as a largefile. The |
|
29 | 29 | size used to determine whether or not to track a file as a |
|
30 | 30 | largefile is the size of the first version of the file. The |
|
31 | 31 | minimum size can be specified either with --size or in |
|
32 | 32 | configuration as ``largefiles.size``. |
|
33 | 33 | |
|
34 | 34 | After running this command you will need to make sure that |
|
35 | 35 | largefiles is enabled anywhere you intend to push the new |
|
36 | 36 | repository. |
|
37 | 37 | |
|
38 | 38 | Use --to-normal to convert largefiles back to normal files; after |
|
39 | 39 | this, the DEST repository can be used without largefiles at all.''' |
|
40 | 40 | |
|
41 | 41 | if opts['to_normal']: |
|
42 | 42 | tolfile = False |
|
43 | 43 | else: |
|
44 | 44 | tolfile = True |
|
45 | 45 | size = lfutil.getminsize(ui, True, opts.get('size'), default=None) |
|
46 | 46 | |
|
47 | 47 | if not hg.islocal(src): |
|
48 | 48 | raise util.Abort(_('%s is not a local Mercurial repo') % src) |
|
49 | 49 | if not hg.islocal(dest): |
|
50 | 50 | raise util.Abort(_('%s is not a local Mercurial repo') % dest) |
|
51 | 51 | |
|
52 | 52 | rsrc = hg.repository(ui, src) |
|
53 | 53 | ui.status(_('initializing destination %s\n') % dest) |
|
54 | 54 | rdst = hg.repository(ui, dest, create=True) |
|
55 | 55 | |
|
56 | 56 | success = False |
|
57 | 57 | try: |
|
58 | 58 | # Lock destination to prevent modification while it is converted to. |
|
59 | 59 | # Don't need to lock src because we are just reading from its history |
|
60 | 60 | # which can't change. |
|
61 | 61 | dstlock = rdst.lock() |
|
62 | 62 | |
|
63 | 63 | # Get a list of all changesets in the source. The easy way to do this |
|
64 | 64 | # is to simply walk the changelog, using changelog.nodesbewteen(). |
|
65 | 65 | # Take a look at mercurial/revlog.py:639 for more details. |
|
66 | 66 | # Use a generator instead of a list to decrease memory usage |
|
67 | 67 | ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None, |
|
68 | 68 | rsrc.heads())[0]) |
|
69 | 69 | revmap = {node.nullid: node.nullid} |
|
70 | 70 | if tolfile: |
|
71 | 71 | lfiles = set() |
|
72 | 72 | normalfiles = set() |
|
73 | 73 | if not pats: |
|
74 | 74 | pats = ui.configlist(lfutil.longname, 'patterns', default=[]) |
|
75 | 75 | if pats: |
|
76 | 76 | matcher = match_.match(rsrc.root, '', list(pats)) |
|
77 | 77 | else: |
|
78 | 78 | matcher = None |
|
79 | 79 | |
|
80 | 80 | lfiletohash = {} |
|
81 | 81 | for ctx in ctxs: |
|
82 | 82 | ui.progress(_('converting revisions'), ctx.rev(), |
|
83 | 83 | unit=_('revision'), total=rsrc['tip'].rev()) |
|
84 | 84 | _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, |
|
85 | 85 | lfiles, normalfiles, matcher, size, lfiletohash) |
|
86 | 86 | ui.progress(_('converting revisions'), None) |
|
87 | 87 | |
|
88 | 88 | if os.path.exists(rdst.wjoin(lfutil.shortname)): |
|
89 | 89 | shutil.rmtree(rdst.wjoin(lfutil.shortname)) |
|
90 | 90 | |
|
91 | 91 | for f in lfiletohash.keys(): |
|
92 | 92 | if os.path.isfile(rdst.wjoin(f)): |
|
93 | 93 | os.unlink(rdst.wjoin(f)) |
|
94 | 94 | try: |
|
95 | 95 | os.removedirs(os.path.dirname(rdst.wjoin(f))) |
|
96 | 96 | except OSError: |
|
97 | 97 | pass |
|
98 | 98 | |
|
99 | 99 | # If there were any files converted to largefiles, add largefiles |
|
100 | 100 | # to the destination repository's requirements. |
|
101 | 101 | if lfiles: |
|
102 | 102 | rdst.requirements.add('largefiles') |
|
103 | 103 | rdst._writerequirements() |
|
104 | 104 | else: |
|
105 | 105 | for ctx in ctxs: |
|
106 | 106 | ui.progress(_('converting revisions'), ctx.rev(), |
|
107 | 107 | unit=_('revision'), total=rsrc['tip'].rev()) |
|
108 | 108 | _addchangeset(ui, rsrc, rdst, ctx, revmap) |
|
109 | 109 | |
|
110 | 110 | ui.progress(_('converting revisions'), None) |
|
111 | 111 | success = True |
|
112 | 112 | finally: |
|
113 | 113 | if not success: |
|
114 | 114 | # we failed, remove the new directory |
|
115 | 115 | shutil.rmtree(rdst.root) |
|
116 | 116 | dstlock.release() |
|
117 | 117 | |
|
118 | 118 | def _addchangeset(ui, rsrc, rdst, ctx, revmap): |
|
119 | 119 | # Convert src parents to dst parents |
|
120 | 120 | parents = _convertparents(ctx, revmap) |
|
121 | 121 | |
|
122 | 122 | # Generate list of changed files |
|
123 | 123 | files = _getchangedfiles(ctx, parents) |
|
124 | 124 | |
|
125 | 125 | def getfilectx(repo, memctx, f): |
|
126 | 126 | if lfutil.standin(f) in files: |
|
127 | 127 | # if the file isn't in the manifest then it was removed |
|
128 | 128 | # or renamed, raise IOError to indicate this |
|
129 | 129 | try: |
|
130 | 130 | fctx = ctx.filectx(lfutil.standin(f)) |
|
131 | 131 | except error.LookupError: |
|
132 |
raise IOError |
|
|
132 | raise IOError | |
|
133 | 133 | renamed = fctx.renamed() |
|
134 | 134 | if renamed: |
|
135 | 135 | renamed = lfutil.splitstandin(renamed[0]) |
|
136 | 136 | |
|
137 | 137 | hash = fctx.data().strip() |
|
138 | 138 | path = lfutil.findfile(rsrc, hash) |
|
139 | 139 | ### TODO: What if the file is not cached? |
|
140 | 140 | data = '' |
|
141 | 141 | fd = None |
|
142 | 142 | try: |
|
143 | 143 | fd = open(path, 'rb') |
|
144 | 144 | data = fd.read() |
|
145 | 145 | finally: |
|
146 | 146 | if fd: |
|
147 | 147 | fd.close() |
|
148 | 148 | return context.memfilectx(f, data, 'l' in fctx.flags(), |
|
149 | 149 | 'x' in fctx.flags(), renamed) |
|
150 | 150 | else: |
|
151 | 151 | return _getnormalcontext(repo.ui, ctx, f, revmap) |
|
152 | 152 | |
|
153 | 153 | dstfiles = [] |
|
154 | 154 | for file in files: |
|
155 | 155 | if lfutil.isstandin(file): |
|
156 | 156 | dstfiles.append(lfutil.splitstandin(file)) |
|
157 | 157 | else: |
|
158 | 158 | dstfiles.append(file) |
|
159 | 159 | # Commit |
|
160 | 160 | _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap) |
|
161 | 161 | |
|
162 | 162 | def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles, |
|
163 | 163 | matcher, size, lfiletohash): |
|
164 | 164 | # Convert src parents to dst parents |
|
165 | 165 | parents = _convertparents(ctx, revmap) |
|
166 | 166 | |
|
167 | 167 | # Generate list of changed files |
|
168 | 168 | files = _getchangedfiles(ctx, parents) |
|
169 | 169 | |
|
170 | 170 | dstfiles = [] |
|
171 | 171 | for f in files: |
|
172 | 172 | if f not in lfiles and f not in normalfiles: |
|
173 | 173 | islfile = _islfile(f, ctx, matcher, size) |
|
174 | 174 | # If this file was renamed or copied then copy |
|
175 | 175 | # the lfileness of its predecessor |
|
176 | 176 | if f in ctx.manifest(): |
|
177 | 177 | fctx = ctx.filectx(f) |
|
178 | 178 | renamed = fctx.renamed() |
|
179 | 179 | renamedlfile = renamed and renamed[0] in lfiles |
|
180 | 180 | islfile |= renamedlfile |
|
181 | 181 | if 'l' in fctx.flags(): |
|
182 | 182 | if renamedlfile: |
|
183 | 183 | raise util.Abort( |
|
184 | 184 | _('renamed/copied largefile %s becomes symlink') |
|
185 | 185 | % f) |
|
186 | 186 | islfile = False |
|
187 | 187 | if islfile: |
|
188 | 188 | lfiles.add(f) |
|
189 | 189 | else: |
|
190 | 190 | normalfiles.add(f) |
|
191 | 191 | |
|
192 | 192 | if f in lfiles: |
|
193 | 193 | dstfiles.append(lfutil.standin(f)) |
|
194 | 194 | # largefile in manifest if it has not been removed/renamed |
|
195 | 195 | if f in ctx.manifest(): |
|
196 | 196 | fctx = ctx.filectx(f) |
|
197 | 197 | if 'l' in fctx.flags(): |
|
198 | 198 | renamed = fctx.renamed() |
|
199 | 199 | if renamed and renamed[0] in lfiles: |
|
200 | 200 | raise util.Abort(_('largefile %s becomes symlink') % f) |
|
201 | 201 | |
|
202 | 202 | # largefile was modified, update standins |
|
203 | 203 | fullpath = rdst.wjoin(f) |
|
204 | 204 | util.makedirs(os.path.dirname(fullpath)) |
|
205 | 205 | m = util.sha1('') |
|
206 | 206 | m.update(ctx[f].data()) |
|
207 | 207 | hash = m.hexdigest() |
|
208 | 208 | if f not in lfiletohash or lfiletohash[f] != hash: |
|
209 | 209 | try: |
|
210 | 210 | fd = open(fullpath, 'wb') |
|
211 | 211 | fd.write(ctx[f].data()) |
|
212 | 212 | finally: |
|
213 | 213 | if fd: |
|
214 | 214 | fd.close() |
|
215 | 215 | executable = 'x' in ctx[f].flags() |
|
216 | 216 | os.chmod(fullpath, lfutil.getmode(executable)) |
|
217 | 217 | lfutil.writestandin(rdst, lfutil.standin(f), hash, |
|
218 | 218 | executable) |
|
219 | 219 | lfiletohash[f] = hash |
|
220 | 220 | else: |
|
221 | 221 | # normal file |
|
222 | 222 | dstfiles.append(f) |
|
223 | 223 | |
|
224 | 224 | def getfilectx(repo, memctx, f): |
|
225 | 225 | if lfutil.isstandin(f): |
|
226 | 226 | # if the file isn't in the manifest then it was removed |
|
227 | 227 | # or renamed, raise IOError to indicate this |
|
228 | 228 | srcfname = lfutil.splitstandin(f) |
|
229 | 229 | try: |
|
230 | 230 | fctx = ctx.filectx(srcfname) |
|
231 | 231 | except error.LookupError: |
|
232 |
raise IOError |
|
|
232 | raise IOError | |
|
233 | 233 | renamed = fctx.renamed() |
|
234 | 234 | if renamed: |
|
235 | 235 | # standin is always a largefile because largefile-ness |
|
236 | 236 | # doesn't change after rename or copy |
|
237 | 237 | renamed = lfutil.standin(renamed[0]) |
|
238 | 238 | |
|
239 | 239 | return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in |
|
240 | 240 | fctx.flags(), 'x' in fctx.flags(), renamed) |
|
241 | 241 | else: |
|
242 | 242 | return _getnormalcontext(repo.ui, ctx, f, revmap) |
|
243 | 243 | |
|
244 | 244 | # Commit |
|
245 | 245 | _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap) |
|
246 | 246 | |
|
247 | 247 | def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap): |
|
248 | 248 | mctx = context.memctx(rdst, parents, ctx.description(), dstfiles, |
|
249 | 249 | getfilectx, ctx.user(), ctx.date(), ctx.extra()) |
|
250 | 250 | ret = rdst.commitctx(mctx) |
|
251 | 251 | rdst.setparents(ret) |
|
252 | 252 | revmap[ctx.node()] = rdst.changelog.tip() |
|
253 | 253 | |
|
254 | 254 | # Generate list of changed files |
|
255 | 255 | def _getchangedfiles(ctx, parents): |
|
256 | 256 | files = set(ctx.files()) |
|
257 | 257 | if node.nullid not in parents: |
|
258 | 258 | mc = ctx.manifest() |
|
259 | 259 | mp1 = ctx.parents()[0].manifest() |
|
260 | 260 | mp2 = ctx.parents()[1].manifest() |
|
261 | 261 | files |= (set(mp1) | set(mp2)) - set(mc) |
|
262 | 262 | for f in mc: |
|
263 | 263 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
264 | 264 | files.add(f) |
|
265 | 265 | return files |
|
266 | 266 | |
|
267 | 267 | # Convert src parents to dst parents |
|
268 | 268 | def _convertparents(ctx, revmap): |
|
269 | 269 | parents = [] |
|
270 | 270 | for p in ctx.parents(): |
|
271 | 271 | parents.append(revmap[p.node()]) |
|
272 | 272 | while len(parents) < 2: |
|
273 | 273 | parents.append(node.nullid) |
|
274 | 274 | return parents |
|
275 | 275 | |
|
276 | 276 | # Get memfilectx for a normal file |
|
277 | 277 | def _getnormalcontext(ui, ctx, f, revmap): |
|
278 | 278 | try: |
|
279 | 279 | fctx = ctx.filectx(f) |
|
280 | 280 | except error.LookupError: |
|
281 |
raise IOError |
|
|
281 | raise IOError | |
|
282 | 282 | renamed = fctx.renamed() |
|
283 | 283 | if renamed: |
|
284 | 284 | renamed = renamed[0] |
|
285 | 285 | |
|
286 | 286 | data = fctx.data() |
|
287 | 287 | if f == '.hgtags': |
|
288 | 288 | data = _converttags (ui, revmap, data) |
|
289 | 289 | return context.memfilectx(f, data, 'l' in fctx.flags(), |
|
290 | 290 | 'x' in fctx.flags(), renamed) |
|
291 | 291 | |
|
292 | 292 | # Remap tag data using a revision map |
|
293 | 293 | def _converttags(ui, revmap, data): |
|
294 | 294 | newdata = [] |
|
295 | 295 | for line in data.splitlines(): |
|
296 | 296 | try: |
|
297 | 297 | id, name = line.split(' ', 1) |
|
298 | 298 | except ValueError: |
|
299 | 299 | ui.warn(_('skipping incorrectly formatted tag %s\n' |
|
300 | 300 | % line)) |
|
301 | 301 | continue |
|
302 | 302 | try: |
|
303 | 303 | newid = node.bin(id) |
|
304 | 304 | except TypeError: |
|
305 | 305 | ui.warn(_('skipping incorrectly formatted id %s\n' |
|
306 | 306 | % id)) |
|
307 | 307 | continue |
|
308 | 308 | try: |
|
309 | 309 | newdata.append('%s %s\n' % (node.hex(revmap[newid]), |
|
310 | 310 | name)) |
|
311 | 311 | except KeyError: |
|
312 | 312 | ui.warn(_('no mapping for id %s\n') % id) |
|
313 | 313 | continue |
|
314 | 314 | return ''.join(newdata) |
|
315 | 315 | |
|
316 | 316 | def _islfile(file, ctx, matcher, size): |
|
317 | 317 | '''Return true if file should be considered a largefile, i.e. |
|
318 | 318 | matcher matches it or it is larger than size.''' |
|
319 | 319 | # never store special .hg* files as largefiles |
|
320 | 320 | if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs': |
|
321 | 321 | return False |
|
322 | 322 | if matcher and matcher(file): |
|
323 | 323 | return True |
|
324 | 324 | try: |
|
325 | 325 | return ctx.filectx(file).size() >= size * 1024 * 1024 |
|
326 | 326 | except error.LookupError: |
|
327 | 327 | return False |
|
328 | 328 | |
|
329 | 329 | def uploadlfiles(ui, rsrc, rdst, files): |
|
330 | 330 | '''upload largefiles to the central store''' |
|
331 | 331 | |
|
332 | 332 | if not files: |
|
333 | 333 | return |
|
334 | 334 | |
|
335 | 335 | store = basestore._openstore(rsrc, rdst, put=True) |
|
336 | 336 | |
|
337 | 337 | at = 0 |
|
338 | 338 | files = filter(lambda h: not store.exists(h), files) |
|
339 | 339 | for hash in files: |
|
340 | 340 | ui.progress(_('uploading largefiles'), at, unit='largefile', |
|
341 | 341 | total=len(files)) |
|
342 | 342 | source = lfutil.findfile(rsrc, hash) |
|
343 | 343 | if not source: |
|
344 | 344 | raise util.Abort(_('largefile %s missing from store' |
|
345 | 345 | ' (needs to be uploaded)') % hash) |
|
346 | 346 | # XXX check for errors here |
|
347 | 347 | store.put(source, hash) |
|
348 | 348 | at += 1 |
|
349 | 349 | ui.progress(_('uploading largefiles'), None) |
|
350 | 350 | |
|
351 | 351 | def verifylfiles(ui, repo, all=False, contents=False): |
|
352 | 352 | '''Verify that every big file revision in the current changeset |
|
353 | 353 | exists in the central store. With --contents, also verify that |
|
354 | 354 | the contents of each big file revision are correct (SHA-1 hash |
|
355 | 355 | matches the revision ID). With --all, check every changeset in |
|
356 | 356 | this repository.''' |
|
357 | 357 | if all: |
|
358 | 358 | # Pass a list to the function rather than an iterator because we know a |
|
359 | 359 | # list will work. |
|
360 | 360 | revs = range(len(repo)) |
|
361 | 361 | else: |
|
362 | 362 | revs = ['.'] |
|
363 | 363 | |
|
364 | 364 | store = basestore._openstore(repo) |
|
365 | 365 | return store.verify(revs, contents=contents) |
|
366 | 366 | |
|
367 | 367 | def cachelfiles(ui, repo, node): |
|
368 | 368 | '''cachelfiles ensures that all largefiles needed by the specified revision |
|
369 | 369 | are present in the repository's largefile cache. |
|
370 | 370 | |
|
371 | 371 | returns a tuple (cached, missing). cached is the list of files downloaded |
|
372 | 372 | by this operation; missing is the list of files that were needed but could |
|
373 | 373 | not be found.''' |
|
374 | 374 | lfiles = lfutil.listlfiles(repo, node) |
|
375 | 375 | toget = [] |
|
376 | 376 | |
|
377 | 377 | for lfile in lfiles: |
|
378 | 378 | # If we are mid-merge, then we have to trust the standin that is in the |
|
379 | 379 | # working copy to have the correct hashvalue. This is because the |
|
380 | 380 | # original hg.merge() already updated the standin as part of the normal |
|
381 | 381 | # merge process -- we just have to udpate the largefile to match. |
|
382 | 382 | if (getattr(repo, "_ismerging", False) and |
|
383 | 383 | os.path.exists(repo.wjoin(lfutil.standin(lfile)))): |
|
384 | 384 | expectedhash = lfutil.readstandin(repo, lfile) |
|
385 | 385 | else: |
|
386 | 386 | expectedhash = repo[node][lfutil.standin(lfile)].data().strip() |
|
387 | 387 | |
|
388 | 388 | # if it exists and its hash matches, it might have been locally |
|
389 | 389 | # modified before updating and the user chose 'local'. in this case, |
|
390 | 390 | # it will not be in any store, so don't look for it. |
|
391 | 391 | if ((not os.path.exists(repo.wjoin(lfile)) or |
|
392 | 392 | expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and |
|
393 | 393 | not lfutil.findfile(repo, expectedhash)): |
|
394 | 394 | toget.append((lfile, expectedhash)) |
|
395 | 395 | |
|
396 | 396 | if toget: |
|
397 | 397 | store = basestore._openstore(repo) |
|
398 | 398 | ret = store.get(toget) |
|
399 | 399 | return ret |
|
400 | 400 | |
|
401 | 401 | return ([], []) |
|
402 | 402 | |
|
403 | 403 | def updatelfiles(ui, repo, filelist=None, printmessage=True): |
|
404 | 404 | wlock = repo.wlock() |
|
405 | 405 | try: |
|
406 | 406 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
407 | 407 | lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) |
|
408 | 408 | |
|
409 | 409 | if filelist is not None: |
|
410 | 410 | lfiles = [f for f in lfiles if f in filelist] |
|
411 | 411 | |
|
412 | 412 | printed = False |
|
413 | 413 | if printmessage and lfiles: |
|
414 | 414 | ui.status(_('getting changed largefiles\n')) |
|
415 | 415 | printed = True |
|
416 | 416 | cachelfiles(ui, repo, '.') |
|
417 | 417 | |
|
418 | 418 | updated, removed = 0, 0 |
|
419 | 419 | for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles): |
|
420 | 420 | # increment the appropriate counter according to _updatelfile's |
|
421 | 421 | # return value |
|
422 | 422 | updated += i > 0 and i or 0 |
|
423 | 423 | removed -= i < 0 and i or 0 |
|
424 | 424 | if printmessage and (removed or updated) and not printed: |
|
425 | 425 | ui.status(_('getting changed largefiles\n')) |
|
426 | 426 | printed = True |
|
427 | 427 | |
|
428 | 428 | lfdirstate.write() |
|
429 | 429 | if printed and printmessage: |
|
430 | 430 | ui.status(_('%d largefiles updated, %d removed\n') % (updated, |
|
431 | 431 | removed)) |
|
432 | 432 | finally: |
|
433 | 433 | wlock.release() |
|
434 | 434 | |
|
435 | 435 | def _updatelfile(repo, lfdirstate, lfile): |
|
436 | 436 | '''updates a single largefile and copies the state of its standin from |
|
437 | 437 | the repository's dirstate to its state in the lfdirstate. |
|
438 | 438 | |
|
439 | 439 | returns 1 if the file was modified, -1 if the file was removed, 0 if the |
|
440 | 440 | file was unchanged, and None if the needed largefile was missing from the |
|
441 | 441 | cache.''' |
|
442 | 442 | ret = 0 |
|
443 | 443 | abslfile = repo.wjoin(lfile) |
|
444 | 444 | absstandin = repo.wjoin(lfutil.standin(lfile)) |
|
445 | 445 | if os.path.exists(absstandin): |
|
446 | 446 | if os.path.exists(absstandin+'.orig'): |
|
447 | 447 | shutil.copyfile(abslfile, abslfile+'.orig') |
|
448 | 448 | expecthash = lfutil.readstandin(repo, lfile) |
|
449 | 449 | if (expecthash != '' and |
|
450 | 450 | (not os.path.exists(abslfile) or |
|
451 | 451 | expecthash != lfutil.hashfile(abslfile))): |
|
452 | 452 | if not lfutil.copyfromcache(repo, expecthash, lfile): |
|
453 | 453 | # use normallookup() to allocate entry in largefiles dirstate, |
|
454 | 454 | # because lack of it misleads lfilesrepo.status() into |
|
455 | 455 | # recognition that such cache missing files are REMOVED. |
|
456 | 456 | lfdirstate.normallookup(lfile) |
|
457 | 457 | return None # don't try to set the mode |
|
458 | 458 | ret = 1 |
|
459 | 459 | mode = os.stat(absstandin).st_mode |
|
460 | 460 | if mode != os.stat(abslfile).st_mode: |
|
461 | 461 | os.chmod(abslfile, mode) |
|
462 | 462 | ret = 1 |
|
463 | 463 | else: |
|
464 | 464 | # Remove lfiles for which the standin is deleted, unless the |
|
465 | 465 | # lfile is added to the repository again. This happens when a |
|
466 | 466 | # largefile is converted back to a normal file: the standin |
|
467 | 467 | # disappears, but a new (normal) file appears as the lfile. |
|
468 | 468 | if os.path.exists(abslfile) and lfile not in repo[None]: |
|
469 | 469 | util.unlinkpath(abslfile) |
|
470 | 470 | ret = -1 |
|
471 | 471 | state = repo.dirstate[lfutil.standin(lfile)] |
|
472 | 472 | if state == 'n': |
|
473 | 473 | # When rebasing, we need to synchronize the standin and the largefile, |
|
474 | 474 | # because otherwise the largefile will get reverted. But for commit's |
|
475 | 475 | # sake, we have to mark the file as unclean. |
|
476 | 476 | if getattr(repo, "_isrebasing", False): |
|
477 | 477 | lfdirstate.normallookup(lfile) |
|
478 | 478 | else: |
|
479 | 479 | lfdirstate.normal(lfile) |
|
480 | 480 | elif state == 'r': |
|
481 | 481 | lfdirstate.remove(lfile) |
|
482 | 482 | elif state == 'a': |
|
483 | 483 | lfdirstate.add(lfile) |
|
484 | 484 | elif state == '?': |
|
485 | 485 | lfdirstate.drop(lfile) |
|
486 | 486 | return ret |
|
487 | 487 | |
|
488 | 488 | def catlfile(repo, lfile, rev, filename): |
|
489 | 489 | hash = lfutil.readstandin(repo, lfile, rev) |
|
490 | 490 | if not lfutil.inusercache(repo.ui, hash): |
|
491 | 491 | store = basestore._openstore(repo) |
|
492 | 492 | success, missing = store.get([(lfile, hash)]) |
|
493 | 493 | if len(success) != 1: |
|
494 | 494 | raise util.Abort( |
|
495 | 495 | _('largefile %s is not in cache and could not be downloaded') |
|
496 | 496 | % lfile) |
|
497 | 497 | path = lfutil.usercachepath(repo.ui, hash) |
|
498 | 498 | fpout = cmdutil.makefileobj(repo, filename) |
|
499 | 499 | fpin = open(path, "rb") |
|
500 | 500 | fpout.write(fpin.read()) |
|
501 | 501 | fpout.close() |
|
502 | 502 | fpin.close() |
|
503 | 503 | return 0 |
|
504 | 504 | |
|
505 | 505 | # -- hg commands declarations ------------------------------------------------ |
|
506 | 506 | |
|
507 | 507 | cmdtable = { |
|
508 | 508 | 'lfconvert': (lfconvert, |
|
509 | 509 | [('s', 'size', '', |
|
510 | 510 | _('minimum size (MB) for files to be converted ' |
|
511 | 511 | 'as largefiles'), |
|
512 | 512 | 'SIZE'), |
|
513 | 513 | ('', 'to-normal', False, |
|
514 | 514 | _('convert from a largefiles repo to a normal repo')), |
|
515 | 515 | ], |
|
516 | 516 | _('hg lfconvert SOURCE DEST [FILE ...]')), |
|
517 | 517 | } |
@@ -1,3533 +1,3533 | |||
|
1 | 1 | # mq.py - patch queues for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''manage a stack of patches |
|
9 | 9 | |
|
10 | 10 | This extension lets you work with a stack of patches in a Mercurial |
|
11 | 11 | repository. It manages two stacks of patches - all known patches, and |
|
12 | 12 | applied patches (subset of known patches). |
|
13 | 13 | |
|
14 | 14 | Known patches are represented as patch files in the .hg/patches |
|
15 | 15 | directory. Applied patches are both patch files and changesets. |
|
16 | 16 | |
|
17 | 17 | Common tasks (use :hg:`help command` for more details):: |
|
18 | 18 | |
|
19 | 19 | create new patch qnew |
|
20 | 20 | import existing patch qimport |
|
21 | 21 | |
|
22 | 22 | print patch series qseries |
|
23 | 23 | print applied patches qapplied |
|
24 | 24 | |
|
25 | 25 | add known patch to applied stack qpush |
|
26 | 26 | remove patch from applied stack qpop |
|
27 | 27 | refresh contents of top applied patch qrefresh |
|
28 | 28 | |
|
29 | 29 | By default, mq will automatically use git patches when required to |
|
30 | 30 | avoid losing file mode changes, copy records, binary files or empty |
|
31 | 31 | files creations or deletions. This behaviour can be configured with:: |
|
32 | 32 | |
|
33 | 33 | [mq] |
|
34 | 34 | git = auto/keep/yes/no |
|
35 | 35 | |
|
36 | 36 | If set to 'keep', mq will obey the [diff] section configuration while |
|
37 | 37 | preserving existing git patches upon qrefresh. If set to 'yes' or |
|
38 | 38 | 'no', mq will override the [diff] section and always generate git or |
|
39 | 39 | regular patches, possibly losing data in the second case. |
|
40 | 40 | |
|
41 | 41 | It may be desirable for mq changesets to be kept in the secret phase (see |
|
42 | 42 | :hg:`help phases`), which can be enabled with the following setting:: |
|
43 | 43 | |
|
44 | 44 | [mq] |
|
45 | 45 | secret = True |
|
46 | 46 | |
|
47 | 47 | You will by default be managing a patch queue named "patches". You can |
|
48 | 48 | create other, independent patch queues with the :hg:`qqueue` command. |
|
49 | 49 | |
|
50 | 50 | If the working directory contains uncommitted files, qpush, qpop and |
|
51 | 51 | qgoto abort immediately. If -f/--force is used, the changes are |
|
52 | 52 | discarded. Setting: |
|
53 | 53 | |
|
54 | 54 | [mq] |
|
55 | 55 | check = True |
|
56 | 56 | |
|
57 | 57 | make them behave as if -c/--check were passed, and non-conflicting |
|
58 | 58 | local changes will be tolerated and preserved. If incompatible options |
|
59 | 59 | such as -f/--force or --exact are passed, this setting is ignored. |
|
60 | 60 | ''' |
|
61 | 61 | |
|
62 | 62 | from mercurial.i18n import _ |
|
63 | 63 | from mercurial.node import bin, hex, short, nullid, nullrev |
|
64 | 64 | from mercurial.lock import release |
|
65 | 65 | from mercurial import commands, cmdutil, hg, scmutil, util, revset |
|
66 | 66 | from mercurial import repair, extensions, url, error, phases |
|
67 | 67 | from mercurial import patch as patchmod |
|
68 | 68 | import os, re, errno, shutil |
|
69 | 69 | |
|
70 | 70 | commands.norepo += " qclone" |
|
71 | 71 | |
|
72 | 72 | seriesopts = [('s', 'summary', None, _('print first line of patch header'))] |
|
73 | 73 | |
|
74 | 74 | cmdtable = {} |
|
75 | 75 | command = cmdutil.command(cmdtable) |
|
76 | 76 | |
|
77 | 77 | # Patch names looks like unix-file names. |
|
78 | 78 | # They must be joinable with queue directory and result in the patch path. |
|
79 | 79 | normname = util.normpath |
|
80 | 80 | |
|
81 | 81 | class statusentry(object): |
|
82 | 82 | def __init__(self, node, name): |
|
83 | 83 | self.node, self.name = node, name |
|
84 | 84 | def __repr__(self): |
|
85 | 85 | return hex(self.node) + ':' + self.name |
|
86 | 86 | |
|
87 | 87 | class patchheader(object): |
|
88 | 88 | def __init__(self, pf, plainmode=False): |
|
89 | 89 | def eatdiff(lines): |
|
90 | 90 | while lines: |
|
91 | 91 | l = lines[-1] |
|
92 | 92 | if (l.startswith("diff -") or |
|
93 | 93 | l.startswith("Index:") or |
|
94 | 94 | l.startswith("===========")): |
|
95 | 95 | del lines[-1] |
|
96 | 96 | else: |
|
97 | 97 | break |
|
98 | 98 | def eatempty(lines): |
|
99 | 99 | while lines: |
|
100 | 100 | if not lines[-1].strip(): |
|
101 | 101 | del lines[-1] |
|
102 | 102 | else: |
|
103 | 103 | break |
|
104 | 104 | |
|
105 | 105 | message = [] |
|
106 | 106 | comments = [] |
|
107 | 107 | user = None |
|
108 | 108 | date = None |
|
109 | 109 | parent = None |
|
110 | 110 | format = None |
|
111 | 111 | subject = None |
|
112 | 112 | branch = None |
|
113 | 113 | nodeid = None |
|
114 | 114 | diffstart = 0 |
|
115 | 115 | |
|
116 | 116 | for line in file(pf): |
|
117 | 117 | line = line.rstrip() |
|
118 | 118 | if (line.startswith('diff --git') |
|
119 | 119 | or (diffstart and line.startswith('+++ '))): |
|
120 | 120 | diffstart = 2 |
|
121 | 121 | break |
|
122 | 122 | diffstart = 0 # reset |
|
123 | 123 | if line.startswith("--- "): |
|
124 | 124 | diffstart = 1 |
|
125 | 125 | continue |
|
126 | 126 | elif format == "hgpatch": |
|
127 | 127 | # parse values when importing the result of an hg export |
|
128 | 128 | if line.startswith("# User "): |
|
129 | 129 | user = line[7:] |
|
130 | 130 | elif line.startswith("# Date "): |
|
131 | 131 | date = line[7:] |
|
132 | 132 | elif line.startswith("# Parent "): |
|
133 | 133 | parent = line[9:].lstrip() |
|
134 | 134 | elif line.startswith("# Branch "): |
|
135 | 135 | branch = line[9:] |
|
136 | 136 | elif line.startswith("# Node ID "): |
|
137 | 137 | nodeid = line[10:] |
|
138 | 138 | elif not line.startswith("# ") and line: |
|
139 | 139 | message.append(line) |
|
140 | 140 | format = None |
|
141 | 141 | elif line == '# HG changeset patch': |
|
142 | 142 | message = [] |
|
143 | 143 | format = "hgpatch" |
|
144 | 144 | elif (format != "tagdone" and (line.startswith("Subject: ") or |
|
145 | 145 | line.startswith("subject: "))): |
|
146 | 146 | subject = line[9:] |
|
147 | 147 | format = "tag" |
|
148 | 148 | elif (format != "tagdone" and (line.startswith("From: ") or |
|
149 | 149 | line.startswith("from: "))): |
|
150 | 150 | user = line[6:] |
|
151 | 151 | format = "tag" |
|
152 | 152 | elif (format != "tagdone" and (line.startswith("Date: ") or |
|
153 | 153 | line.startswith("date: "))): |
|
154 | 154 | date = line[6:] |
|
155 | 155 | format = "tag" |
|
156 | 156 | elif format == "tag" and line == "": |
|
157 | 157 | # when looking for tags (subject: from: etc) they |
|
158 | 158 | # end once you find a blank line in the source |
|
159 | 159 | format = "tagdone" |
|
160 | 160 | elif message or line: |
|
161 | 161 | message.append(line) |
|
162 | 162 | comments.append(line) |
|
163 | 163 | |
|
164 | 164 | eatdiff(message) |
|
165 | 165 | eatdiff(comments) |
|
166 | 166 | # Remember the exact starting line of the patch diffs before consuming |
|
167 | 167 | # empty lines, for external use by TortoiseHg and others |
|
168 | 168 | self.diffstartline = len(comments) |
|
169 | 169 | eatempty(message) |
|
170 | 170 | eatempty(comments) |
|
171 | 171 | |
|
172 | 172 | # make sure message isn't empty |
|
173 | 173 | if format and format.startswith("tag") and subject: |
|
174 | 174 | message.insert(0, "") |
|
175 | 175 | message.insert(0, subject) |
|
176 | 176 | |
|
177 | 177 | self.message = message |
|
178 | 178 | self.comments = comments |
|
179 | 179 | self.user = user |
|
180 | 180 | self.date = date |
|
181 | 181 | self.parent = parent |
|
182 | 182 | # nodeid and branch are for external use by TortoiseHg and others |
|
183 | 183 | self.nodeid = nodeid |
|
184 | 184 | self.branch = branch |
|
185 | 185 | self.haspatch = diffstart > 1 |
|
186 | 186 | self.plainmode = plainmode |
|
187 | 187 | |
|
188 | 188 | def setuser(self, user): |
|
189 | 189 | if not self.updateheader(['From: ', '# User '], user): |
|
190 | 190 | try: |
|
191 | 191 | patchheaderat = self.comments.index('# HG changeset patch') |
|
192 | 192 | self.comments.insert(patchheaderat + 1, '# User ' + user) |
|
193 | 193 | except ValueError: |
|
194 | 194 | if self.plainmode or self._hasheader(['Date: ']): |
|
195 | 195 | self.comments = ['From: ' + user] + self.comments |
|
196 | 196 | else: |
|
197 | 197 | tmp = ['# HG changeset patch', '# User ' + user, ''] |
|
198 | 198 | self.comments = tmp + self.comments |
|
199 | 199 | self.user = user |
|
200 | 200 | |
|
201 | 201 | def setdate(self, date): |
|
202 | 202 | if not self.updateheader(['Date: ', '# Date '], date): |
|
203 | 203 | try: |
|
204 | 204 | patchheaderat = self.comments.index('# HG changeset patch') |
|
205 | 205 | self.comments.insert(patchheaderat + 1, '# Date ' + date) |
|
206 | 206 | except ValueError: |
|
207 | 207 | if self.plainmode or self._hasheader(['From: ']): |
|
208 | 208 | self.comments = ['Date: ' + date] + self.comments |
|
209 | 209 | else: |
|
210 | 210 | tmp = ['# HG changeset patch', '# Date ' + date, ''] |
|
211 | 211 | self.comments = tmp + self.comments |
|
212 | 212 | self.date = date |
|
213 | 213 | |
|
214 | 214 | def setparent(self, parent): |
|
215 | 215 | if not self.updateheader(['# Parent '], parent): |
|
216 | 216 | try: |
|
217 | 217 | patchheaderat = self.comments.index('# HG changeset patch') |
|
218 | 218 | self.comments.insert(patchheaderat + 1, '# Parent ' + parent) |
|
219 | 219 | except ValueError: |
|
220 | 220 | pass |
|
221 | 221 | self.parent = parent |
|
222 | 222 | |
|
223 | 223 | def setmessage(self, message): |
|
224 | 224 | if self.comments: |
|
225 | 225 | self._delmsg() |
|
226 | 226 | self.message = [message] |
|
227 | 227 | self.comments += self.message |
|
228 | 228 | |
|
229 | 229 | def updateheader(self, prefixes, new): |
|
230 | 230 | '''Update all references to a field in the patch header. |
|
231 | 231 | Return whether the field is present.''' |
|
232 | 232 | res = False |
|
233 | 233 | for prefix in prefixes: |
|
234 | 234 | for i in xrange(len(self.comments)): |
|
235 | 235 | if self.comments[i].startswith(prefix): |
|
236 | 236 | self.comments[i] = prefix + new |
|
237 | 237 | res = True |
|
238 | 238 | break |
|
239 | 239 | return res |
|
240 | 240 | |
|
241 | 241 | def _hasheader(self, prefixes): |
|
242 | 242 | '''Check if a header starts with any of the given prefixes.''' |
|
243 | 243 | for prefix in prefixes: |
|
244 | 244 | for comment in self.comments: |
|
245 | 245 | if comment.startswith(prefix): |
|
246 | 246 | return True |
|
247 | 247 | return False |
|
248 | 248 | |
|
249 | 249 | def __str__(self): |
|
250 | 250 | if not self.comments: |
|
251 | 251 | return '' |
|
252 | 252 | return '\n'.join(self.comments) + '\n\n' |
|
253 | 253 | |
|
254 | 254 | def _delmsg(self): |
|
255 | 255 | '''Remove existing message, keeping the rest of the comments fields. |
|
256 | 256 | If comments contains 'subject: ', message will prepend |
|
257 | 257 | the field and a blank line.''' |
|
258 | 258 | if self.message: |
|
259 | 259 | subj = 'subject: ' + self.message[0].lower() |
|
260 | 260 | for i in xrange(len(self.comments)): |
|
261 | 261 | if subj == self.comments[i].lower(): |
|
262 | 262 | del self.comments[i] |
|
263 | 263 | self.message = self.message[2:] |
|
264 | 264 | break |
|
265 | 265 | ci = 0 |
|
266 | 266 | for mi in self.message: |
|
267 | 267 | while mi != self.comments[ci]: |
|
268 | 268 | ci += 1 |
|
269 | 269 | del self.comments[ci] |
|
270 | 270 | |
|
271 | 271 | def newcommit(repo, phase, *args, **kwargs): |
|
272 | 272 | """helper dedicated to ensure a commit respect mq.secret setting |
|
273 | 273 | |
|
274 | 274 | It should be used instead of repo.commit inside the mq source for operation |
|
275 | 275 | creating new changeset. |
|
276 | 276 | """ |
|
277 | 277 | if phase is None: |
|
278 | 278 | if repo.ui.configbool('mq', 'secret', False): |
|
279 | 279 | phase = phases.secret |
|
280 | 280 | if phase is not None: |
|
281 | 281 | backup = repo.ui.backupconfig('phases', 'new-commit') |
|
282 | 282 | # Marking the repository as committing an mq patch can be used |
|
283 | 283 | # to optimize operations like _branchtags(). |
|
284 | 284 | repo._committingpatch = True |
|
285 | 285 | try: |
|
286 | 286 | if phase is not None: |
|
287 | 287 | repo.ui.setconfig('phases', 'new-commit', phase) |
|
288 | 288 | return repo.commit(*args, **kwargs) |
|
289 | 289 | finally: |
|
290 | 290 | repo._committingpatch = False |
|
291 | 291 | if phase is not None: |
|
292 | 292 | repo.ui.restoreconfig(backup) |
|
293 | 293 | |
|
294 | 294 | class AbortNoCleanup(error.Abort): |
|
295 | 295 | pass |
|
296 | 296 | |
|
297 | 297 | class queue(object): |
|
298 | 298 | def __init__(self, ui, path, patchdir=None): |
|
299 | 299 | self.basepath = path |
|
300 | 300 | try: |
|
301 | 301 | fh = open(os.path.join(path, 'patches.queue')) |
|
302 | 302 | cur = fh.read().rstrip() |
|
303 | 303 | fh.close() |
|
304 | 304 | if not cur: |
|
305 | 305 | curpath = os.path.join(path, 'patches') |
|
306 | 306 | else: |
|
307 | 307 | curpath = os.path.join(path, 'patches-' + cur) |
|
308 | 308 | except IOError: |
|
309 | 309 | curpath = os.path.join(path, 'patches') |
|
310 | 310 | self.path = patchdir or curpath |
|
311 | 311 | self.opener = scmutil.opener(self.path) |
|
312 | 312 | self.ui = ui |
|
313 | 313 | self.applieddirty = False |
|
314 | 314 | self.seriesdirty = False |
|
315 | 315 | self.added = [] |
|
316 | 316 | self.seriespath = "series" |
|
317 | 317 | self.statuspath = "status" |
|
318 | 318 | self.guardspath = "guards" |
|
319 | 319 | self.activeguards = None |
|
320 | 320 | self.guardsdirty = False |
|
321 | 321 | # Handle mq.git as a bool with extended values |
|
322 | 322 | try: |
|
323 | 323 | gitmode = ui.configbool('mq', 'git', None) |
|
324 | 324 | if gitmode is None: |
|
325 |
raise error.ConfigError |
|
|
325 | raise error.ConfigError | |
|
326 | 326 | self.gitmode = gitmode and 'yes' or 'no' |
|
327 | 327 | except error.ConfigError: |
|
328 | 328 | self.gitmode = ui.config('mq', 'git', 'auto').lower() |
|
329 | 329 | self.plainmode = ui.configbool('mq', 'plain', False) |
|
330 | 330 | |
|
331 | 331 | @util.propertycache |
|
332 | 332 | def applied(self): |
|
333 | 333 | def parselines(lines): |
|
334 | 334 | for l in lines: |
|
335 | 335 | entry = l.split(':', 1) |
|
336 | 336 | if len(entry) > 1: |
|
337 | 337 | n, name = entry |
|
338 | 338 | yield statusentry(bin(n), name) |
|
339 | 339 | elif l.strip(): |
|
340 | 340 | self.ui.warn(_('malformated mq status line: %s\n') % entry) |
|
341 | 341 | # else we ignore empty lines |
|
342 | 342 | try: |
|
343 | 343 | lines = self.opener.read(self.statuspath).splitlines() |
|
344 | 344 | return list(parselines(lines)) |
|
345 | 345 | except IOError, e: |
|
346 | 346 | if e.errno == errno.ENOENT: |
|
347 | 347 | return [] |
|
348 | 348 | raise |
|
349 | 349 | |
|
350 | 350 | @util.propertycache |
|
351 | 351 | def fullseries(self): |
|
352 | 352 | try: |
|
353 | 353 | return self.opener.read(self.seriespath).splitlines() |
|
354 | 354 | except IOError, e: |
|
355 | 355 | if e.errno == errno.ENOENT: |
|
356 | 356 | return [] |
|
357 | 357 | raise |
|
358 | 358 | |
|
359 | 359 | @util.propertycache |
|
360 | 360 | def series(self): |
|
361 | 361 | self.parseseries() |
|
362 | 362 | return self.series |
|
363 | 363 | |
|
364 | 364 | @util.propertycache |
|
365 | 365 | def seriesguards(self): |
|
366 | 366 | self.parseseries() |
|
367 | 367 | return self.seriesguards |
|
368 | 368 | |
|
369 | 369 | def invalidate(self): |
|
370 | 370 | for a in 'applied fullseries series seriesguards'.split(): |
|
371 | 371 | if a in self.__dict__: |
|
372 | 372 | delattr(self, a) |
|
373 | 373 | self.applieddirty = False |
|
374 | 374 | self.seriesdirty = False |
|
375 | 375 | self.guardsdirty = False |
|
376 | 376 | self.activeguards = None |
|
377 | 377 | |
|
378 | 378 | def diffopts(self, opts={}, patchfn=None): |
|
379 | 379 | diffopts = patchmod.diffopts(self.ui, opts) |
|
380 | 380 | if self.gitmode == 'auto': |
|
381 | 381 | diffopts.upgrade = True |
|
382 | 382 | elif self.gitmode == 'keep': |
|
383 | 383 | pass |
|
384 | 384 | elif self.gitmode in ('yes', 'no'): |
|
385 | 385 | diffopts.git = self.gitmode == 'yes' |
|
386 | 386 | else: |
|
387 | 387 | raise util.Abort(_('mq.git option can be auto/keep/yes/no' |
|
388 | 388 | ' got %s') % self.gitmode) |
|
389 | 389 | if patchfn: |
|
390 | 390 | diffopts = self.patchopts(diffopts, patchfn) |
|
391 | 391 | return diffopts |
|
392 | 392 | |
|
393 | 393 | def patchopts(self, diffopts, *patches): |
|
394 | 394 | """Return a copy of input diff options with git set to true if |
|
395 | 395 | referenced patch is a git patch and should be preserved as such. |
|
396 | 396 | """ |
|
397 | 397 | diffopts = diffopts.copy() |
|
398 | 398 | if not diffopts.git and self.gitmode == 'keep': |
|
399 | 399 | for patchfn in patches: |
|
400 | 400 | patchf = self.opener(patchfn, 'r') |
|
401 | 401 | # if the patch was a git patch, refresh it as a git patch |
|
402 | 402 | for line in patchf: |
|
403 | 403 | if line.startswith('diff --git'): |
|
404 | 404 | diffopts.git = True |
|
405 | 405 | break |
|
406 | 406 | patchf.close() |
|
407 | 407 | return diffopts |
|
408 | 408 | |
|
409 | 409 | def join(self, *p): |
|
410 | 410 | return os.path.join(self.path, *p) |
|
411 | 411 | |
|
412 | 412 | def findseries(self, patch): |
|
413 | 413 | def matchpatch(l): |
|
414 | 414 | l = l.split('#', 1)[0] |
|
415 | 415 | return l.strip() == patch |
|
416 | 416 | for index, l in enumerate(self.fullseries): |
|
417 | 417 | if matchpatch(l): |
|
418 | 418 | return index |
|
419 | 419 | return None |
|
420 | 420 | |
|
421 | 421 | guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)') |
|
422 | 422 | |
|
423 | 423 | def parseseries(self): |
|
424 | 424 | self.series = [] |
|
425 | 425 | self.seriesguards = [] |
|
426 | 426 | for l in self.fullseries: |
|
427 | 427 | h = l.find('#') |
|
428 | 428 | if h == -1: |
|
429 | 429 | patch = l |
|
430 | 430 | comment = '' |
|
431 | 431 | elif h == 0: |
|
432 | 432 | continue |
|
433 | 433 | else: |
|
434 | 434 | patch = l[:h] |
|
435 | 435 | comment = l[h:] |
|
436 | 436 | patch = patch.strip() |
|
437 | 437 | if patch: |
|
438 | 438 | if patch in self.series: |
|
439 | 439 | raise util.Abort(_('%s appears more than once in %s') % |
|
440 | 440 | (patch, self.join(self.seriespath))) |
|
441 | 441 | self.series.append(patch) |
|
442 | 442 | self.seriesguards.append(self.guard_re.findall(comment)) |
|
443 | 443 | |
|
444 | 444 | def checkguard(self, guard): |
|
445 | 445 | if not guard: |
|
446 | 446 | return _('guard cannot be an empty string') |
|
447 | 447 | bad_chars = '# \t\r\n\f' |
|
448 | 448 | first = guard[0] |
|
449 | 449 | if first in '-+': |
|
450 | 450 | return (_('guard %r starts with invalid character: %r') % |
|
451 | 451 | (guard, first)) |
|
452 | 452 | for c in bad_chars: |
|
453 | 453 | if c in guard: |
|
454 | 454 | return _('invalid character in guard %r: %r') % (guard, c) |
|
455 | 455 | |
|
456 | 456 | def setactive(self, guards): |
|
457 | 457 | for guard in guards: |
|
458 | 458 | bad = self.checkguard(guard) |
|
459 | 459 | if bad: |
|
460 | 460 | raise util.Abort(bad) |
|
461 | 461 | guards = sorted(set(guards)) |
|
462 | 462 | self.ui.debug('active guards: %s\n' % ' '.join(guards)) |
|
463 | 463 | self.activeguards = guards |
|
464 | 464 | self.guardsdirty = True |
|
465 | 465 | |
|
466 | 466 | def active(self): |
|
467 | 467 | if self.activeguards is None: |
|
468 | 468 | self.activeguards = [] |
|
469 | 469 | try: |
|
470 | 470 | guards = self.opener.read(self.guardspath).split() |
|
471 | 471 | except IOError, err: |
|
472 | 472 | if err.errno != errno.ENOENT: |
|
473 | 473 | raise |
|
474 | 474 | guards = [] |
|
475 | 475 | for i, guard in enumerate(guards): |
|
476 | 476 | bad = self.checkguard(guard) |
|
477 | 477 | if bad: |
|
478 | 478 | self.ui.warn('%s:%d: %s\n' % |
|
479 | 479 | (self.join(self.guardspath), i + 1, bad)) |
|
480 | 480 | else: |
|
481 | 481 | self.activeguards.append(guard) |
|
482 | 482 | return self.activeguards |
|
483 | 483 | |
|
484 | 484 | def setguards(self, idx, guards): |
|
485 | 485 | for g in guards: |
|
486 | 486 | if len(g) < 2: |
|
487 | 487 | raise util.Abort(_('guard %r too short') % g) |
|
488 | 488 | if g[0] not in '-+': |
|
489 | 489 | raise util.Abort(_('guard %r starts with invalid char') % g) |
|
490 | 490 | bad = self.checkguard(g[1:]) |
|
491 | 491 | if bad: |
|
492 | 492 | raise util.Abort(bad) |
|
493 | 493 | drop = self.guard_re.sub('', self.fullseries[idx]) |
|
494 | 494 | self.fullseries[idx] = drop + ''.join([' #' + g for g in guards]) |
|
495 | 495 | self.parseseries() |
|
496 | 496 | self.seriesdirty = True |
|
497 | 497 | |
|
498 | 498 | def pushable(self, idx): |
|
499 | 499 | if isinstance(idx, str): |
|
500 | 500 | idx = self.series.index(idx) |
|
501 | 501 | patchguards = self.seriesguards[idx] |
|
502 | 502 | if not patchguards: |
|
503 | 503 | return True, None |
|
504 | 504 | guards = self.active() |
|
505 | 505 | exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards] |
|
506 | 506 | if exactneg: |
|
507 | 507 | return False, repr(exactneg[0]) |
|
508 | 508 | pos = [g for g in patchguards if g[0] == '+'] |
|
509 | 509 | exactpos = [g for g in pos if g[1:] in guards] |
|
510 | 510 | if pos: |
|
511 | 511 | if exactpos: |
|
512 | 512 | return True, repr(exactpos[0]) |
|
513 | 513 | return False, ' '.join(map(repr, pos)) |
|
514 | 514 | return True, '' |
|
515 | 515 | |
|
516 | 516 | def explainpushable(self, idx, all_patches=False): |
|
517 | 517 | write = all_patches and self.ui.write or self.ui.warn |
|
518 | 518 | if all_patches or self.ui.verbose: |
|
519 | 519 | if isinstance(idx, str): |
|
520 | 520 | idx = self.series.index(idx) |
|
521 | 521 | pushable, why = self.pushable(idx) |
|
522 | 522 | if all_patches and pushable: |
|
523 | 523 | if why is None: |
|
524 | 524 | write(_('allowing %s - no guards in effect\n') % |
|
525 | 525 | self.series[idx]) |
|
526 | 526 | else: |
|
527 | 527 | if not why: |
|
528 | 528 | write(_('allowing %s - no matching negative guards\n') % |
|
529 | 529 | self.series[idx]) |
|
530 | 530 | else: |
|
531 | 531 | write(_('allowing %s - guarded by %s\n') % |
|
532 | 532 | (self.series[idx], why)) |
|
533 | 533 | if not pushable: |
|
534 | 534 | if why: |
|
535 | 535 | write(_('skipping %s - guarded by %s\n') % |
|
536 | 536 | (self.series[idx], why)) |
|
537 | 537 | else: |
|
538 | 538 | write(_('skipping %s - no matching guards\n') % |
|
539 | 539 | self.series[idx]) |
|
540 | 540 | |
|
541 | 541 | def savedirty(self): |
|
542 | 542 | def writelist(items, path): |
|
543 | 543 | fp = self.opener(path, 'w') |
|
544 | 544 | for i in items: |
|
545 | 545 | fp.write("%s\n" % i) |
|
546 | 546 | fp.close() |
|
547 | 547 | if self.applieddirty: |
|
548 | 548 | writelist(map(str, self.applied), self.statuspath) |
|
549 | 549 | self.applieddirty = False |
|
550 | 550 | if self.seriesdirty: |
|
551 | 551 | writelist(self.fullseries, self.seriespath) |
|
552 | 552 | self.seriesdirty = False |
|
553 | 553 | if self.guardsdirty: |
|
554 | 554 | writelist(self.activeguards, self.guardspath) |
|
555 | 555 | self.guardsdirty = False |
|
556 | 556 | if self.added: |
|
557 | 557 | qrepo = self.qrepo() |
|
558 | 558 | if qrepo: |
|
559 | 559 | qrepo[None].add(f for f in self.added if f not in qrepo[None]) |
|
560 | 560 | self.added = [] |
|
561 | 561 | |
|
562 | 562 | def removeundo(self, repo): |
|
563 | 563 | undo = repo.sjoin('undo') |
|
564 | 564 | if not os.path.exists(undo): |
|
565 | 565 | return |
|
566 | 566 | try: |
|
567 | 567 | os.unlink(undo) |
|
568 | 568 | except OSError, inst: |
|
569 | 569 | self.ui.warn(_('error removing undo: %s\n') % str(inst)) |
|
570 | 570 | |
|
571 | 571 | def backup(self, repo, files, copy=False): |
|
572 | 572 | # backup local changes in --force case |
|
573 | 573 | for f in sorted(files): |
|
574 | 574 | absf = repo.wjoin(f) |
|
575 | 575 | if os.path.lexists(absf): |
|
576 | 576 | self.ui.note(_('saving current version of %s as %s\n') % |
|
577 | 577 | (f, f + '.orig')) |
|
578 | 578 | if copy: |
|
579 | 579 | util.copyfile(absf, absf + '.orig') |
|
580 | 580 | else: |
|
581 | 581 | util.rename(absf, absf + '.orig') |
|
582 | 582 | |
|
583 | 583 | def printdiff(self, repo, diffopts, node1, node2=None, files=None, |
|
584 | 584 | fp=None, changes=None, opts={}): |
|
585 | 585 | stat = opts.get('stat') |
|
586 | 586 | m = scmutil.match(repo[node1], files, opts) |
|
587 | 587 | cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m, |
|
588 | 588 | changes, stat, fp) |
|
589 | 589 | |
|
590 | 590 | def mergeone(self, repo, mergeq, head, patch, rev, diffopts): |
|
591 | 591 | # first try just applying the patch |
|
592 | 592 | (err, n) = self.apply(repo, [patch], update_status=False, |
|
593 | 593 | strict=True, merge=rev) |
|
594 | 594 | |
|
595 | 595 | if err == 0: |
|
596 | 596 | return (err, n) |
|
597 | 597 | |
|
598 | 598 | if n is None: |
|
599 | 599 | raise util.Abort(_("apply failed for patch %s") % patch) |
|
600 | 600 | |
|
601 | 601 | self.ui.warn(_("patch didn't work out, merging %s\n") % patch) |
|
602 | 602 | |
|
603 | 603 | # apply failed, strip away that rev and merge. |
|
604 | 604 | hg.clean(repo, head) |
|
605 | 605 | self.strip(repo, [n], update=False, backup='strip') |
|
606 | 606 | |
|
607 | 607 | ctx = repo[rev] |
|
608 | 608 | ret = hg.merge(repo, rev) |
|
609 | 609 | if ret: |
|
610 | 610 | raise util.Abort(_("update returned %d") % ret) |
|
611 | 611 | n = newcommit(repo, None, ctx.description(), ctx.user(), force=True) |
|
612 | 612 | if n is None: |
|
613 | 613 | raise util.Abort(_("repo commit failed")) |
|
614 | 614 | try: |
|
615 | 615 | ph = patchheader(mergeq.join(patch), self.plainmode) |
|
616 | 616 | except: |
|
617 | 617 | raise util.Abort(_("unable to read %s") % patch) |
|
618 | 618 | |
|
619 | 619 | diffopts = self.patchopts(diffopts, patch) |
|
620 | 620 | patchf = self.opener(patch, "w") |
|
621 | 621 | comments = str(ph) |
|
622 | 622 | if comments: |
|
623 | 623 | patchf.write(comments) |
|
624 | 624 | self.printdiff(repo, diffopts, head, n, fp=patchf) |
|
625 | 625 | patchf.close() |
|
626 | 626 | self.removeundo(repo) |
|
627 | 627 | return (0, n) |
|
628 | 628 | |
|
629 | 629 | def qparents(self, repo, rev=None): |
|
630 | 630 | if rev is None: |
|
631 | 631 | (p1, p2) = repo.dirstate.parents() |
|
632 | 632 | if p2 == nullid: |
|
633 | 633 | return p1 |
|
634 | 634 | if not self.applied: |
|
635 | 635 | return None |
|
636 | 636 | return self.applied[-1].node |
|
637 | 637 | p1, p2 = repo.changelog.parents(rev) |
|
638 | 638 | if p2 != nullid and p2 in [x.node for x in self.applied]: |
|
639 | 639 | return p2 |
|
640 | 640 | return p1 |
|
641 | 641 | |
|
642 | 642 | def mergepatch(self, repo, mergeq, series, diffopts): |
|
643 | 643 | if not self.applied: |
|
644 | 644 | # each of the patches merged in will have two parents. This |
|
645 | 645 | # can confuse the qrefresh, qdiff, and strip code because it |
|
646 | 646 | # needs to know which parent is actually in the patch queue. |
|
647 | 647 | # so, we insert a merge marker with only one parent. This way |
|
648 | 648 | # the first patch in the queue is never a merge patch |
|
649 | 649 | # |
|
650 | 650 | pname = ".hg.patches.merge.marker" |
|
651 | 651 | n = newcommit(repo, None, '[mq]: merge marker', force=True) |
|
652 | 652 | self.removeundo(repo) |
|
653 | 653 | self.applied.append(statusentry(n, pname)) |
|
654 | 654 | self.applieddirty = True |
|
655 | 655 | |
|
656 | 656 | head = self.qparents(repo) |
|
657 | 657 | |
|
658 | 658 | for patch in series: |
|
659 | 659 | patch = mergeq.lookup(patch, strict=True) |
|
660 | 660 | if not patch: |
|
661 | 661 | self.ui.warn(_("patch %s does not exist\n") % patch) |
|
662 | 662 | return (1, None) |
|
663 | 663 | pushable, reason = self.pushable(patch) |
|
664 | 664 | if not pushable: |
|
665 | 665 | self.explainpushable(patch, all_patches=True) |
|
666 | 666 | continue |
|
667 | 667 | info = mergeq.isapplied(patch) |
|
668 | 668 | if not info: |
|
669 | 669 | self.ui.warn(_("patch %s is not applied\n") % patch) |
|
670 | 670 | return (1, None) |
|
671 | 671 | rev = info[1] |
|
672 | 672 | err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts) |
|
673 | 673 | if head: |
|
674 | 674 | self.applied.append(statusentry(head, patch)) |
|
675 | 675 | self.applieddirty = True |
|
676 | 676 | if err: |
|
677 | 677 | return (err, head) |
|
678 | 678 | self.savedirty() |
|
679 | 679 | return (0, head) |
|
680 | 680 | |
|
681 | 681 | def patch(self, repo, patchfile): |
|
682 | 682 | '''Apply patchfile to the working directory. |
|
683 | 683 | patchfile: name of patch file''' |
|
684 | 684 | files = set() |
|
685 | 685 | try: |
|
686 | 686 | fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1, |
|
687 | 687 | files=files, eolmode=None) |
|
688 | 688 | return (True, list(files), fuzz) |
|
689 | 689 | except Exception, inst: |
|
690 | 690 | self.ui.note(str(inst) + '\n') |
|
691 | 691 | if not self.ui.verbose: |
|
692 | 692 | self.ui.warn(_("patch failed, unable to continue (try -v)\n")) |
|
693 | 693 | self.ui.traceback() |
|
694 | 694 | return (False, list(files), False) |
|
695 | 695 | |
|
696 | 696 | def apply(self, repo, series, list=False, update_status=True, |
|
697 | 697 | strict=False, patchdir=None, merge=None, all_files=None, |
|
698 | 698 | tobackup=None, check=False): |
|
699 | 699 | wlock = lock = tr = None |
|
700 | 700 | try: |
|
701 | 701 | wlock = repo.wlock() |
|
702 | 702 | lock = repo.lock() |
|
703 | 703 | tr = repo.transaction("qpush") |
|
704 | 704 | try: |
|
705 | 705 | ret = self._apply(repo, series, list, update_status, |
|
706 | 706 | strict, patchdir, merge, all_files=all_files, |
|
707 | 707 | tobackup=tobackup, check=check) |
|
708 | 708 | tr.close() |
|
709 | 709 | self.savedirty() |
|
710 | 710 | return ret |
|
711 | 711 | except AbortNoCleanup: |
|
712 | 712 | tr.close() |
|
713 | 713 | self.savedirty() |
|
714 | 714 | return 2, repo.dirstate.p1() |
|
715 | 715 | except: |
|
716 | 716 | try: |
|
717 | 717 | tr.abort() |
|
718 | 718 | finally: |
|
719 | 719 | repo.invalidate() |
|
720 | 720 | repo.dirstate.invalidate() |
|
721 | 721 | self.invalidate() |
|
722 | 722 | raise |
|
723 | 723 | finally: |
|
724 | 724 | release(tr, lock, wlock) |
|
725 | 725 | self.removeundo(repo) |
|
726 | 726 | |
|
727 | 727 | def _apply(self, repo, series, list=False, update_status=True, |
|
728 | 728 | strict=False, patchdir=None, merge=None, all_files=None, |
|
729 | 729 | tobackup=None, check=False): |
|
730 | 730 | """returns (error, hash) |
|
731 | 731 | |
|
732 | 732 | error = 1 for unable to read, 2 for patch failed, 3 for patch |
|
733 | 733 | fuzz. tobackup is None or a set of files to backup before they |
|
734 | 734 | are modified by a patch. |
|
735 | 735 | """ |
|
736 | 736 | # TODO unify with commands.py |
|
737 | 737 | if not patchdir: |
|
738 | 738 | patchdir = self.path |
|
739 | 739 | err = 0 |
|
740 | 740 | n = None |
|
741 | 741 | for patchname in series: |
|
742 | 742 | pushable, reason = self.pushable(patchname) |
|
743 | 743 | if not pushable: |
|
744 | 744 | self.explainpushable(patchname, all_patches=True) |
|
745 | 745 | continue |
|
746 | 746 | self.ui.status(_("applying %s\n") % patchname) |
|
747 | 747 | pf = os.path.join(patchdir, patchname) |
|
748 | 748 | |
|
749 | 749 | try: |
|
750 | 750 | ph = patchheader(self.join(patchname), self.plainmode) |
|
751 | 751 | except IOError: |
|
752 | 752 | self.ui.warn(_("unable to read %s\n") % patchname) |
|
753 | 753 | err = 1 |
|
754 | 754 | break |
|
755 | 755 | |
|
756 | 756 | message = ph.message |
|
757 | 757 | if not message: |
|
758 | 758 | # The commit message should not be translated |
|
759 | 759 | message = "imported patch %s\n" % patchname |
|
760 | 760 | else: |
|
761 | 761 | if list: |
|
762 | 762 | # The commit message should not be translated |
|
763 | 763 | message.append("\nimported patch %s" % patchname) |
|
764 | 764 | message = '\n'.join(message) |
|
765 | 765 | |
|
766 | 766 | if ph.haspatch: |
|
767 | 767 | if tobackup: |
|
768 | 768 | touched = patchmod.changedfiles(self.ui, repo, pf) |
|
769 | 769 | touched = set(touched) & tobackup |
|
770 | 770 | if touched and check: |
|
771 | 771 | raise AbortNoCleanup( |
|
772 | 772 | _("local changes found, refresh first")) |
|
773 | 773 | self.backup(repo, touched, copy=True) |
|
774 | 774 | tobackup = tobackup - touched |
|
775 | 775 | (patcherr, files, fuzz) = self.patch(repo, pf) |
|
776 | 776 | if all_files is not None: |
|
777 | 777 | all_files.update(files) |
|
778 | 778 | patcherr = not patcherr |
|
779 | 779 | else: |
|
780 | 780 | self.ui.warn(_("patch %s is empty\n") % patchname) |
|
781 | 781 | patcherr, files, fuzz = 0, [], 0 |
|
782 | 782 | |
|
783 | 783 | if merge and files: |
|
784 | 784 | # Mark as removed/merged and update dirstate parent info |
|
785 | 785 | removed = [] |
|
786 | 786 | merged = [] |
|
787 | 787 | for f in files: |
|
788 | 788 | if os.path.lexists(repo.wjoin(f)): |
|
789 | 789 | merged.append(f) |
|
790 | 790 | else: |
|
791 | 791 | removed.append(f) |
|
792 | 792 | for f in removed: |
|
793 | 793 | repo.dirstate.remove(f) |
|
794 | 794 | for f in merged: |
|
795 | 795 | repo.dirstate.merge(f) |
|
796 | 796 | p1, p2 = repo.dirstate.parents() |
|
797 | 797 | repo.setparents(p1, merge) |
|
798 | 798 | |
|
799 | 799 | match = scmutil.matchfiles(repo, files or []) |
|
800 | 800 | oldtip = repo['tip'] |
|
801 | 801 | n = newcommit(repo, None, message, ph.user, ph.date, match=match, |
|
802 | 802 | force=True) |
|
803 | 803 | if repo['tip'] == oldtip: |
|
804 | 804 | raise util.Abort(_("qpush exactly duplicates child changeset")) |
|
805 | 805 | if n is None: |
|
806 | 806 | raise util.Abort(_("repository commit failed")) |
|
807 | 807 | |
|
808 | 808 | if update_status: |
|
809 | 809 | self.applied.append(statusentry(n, patchname)) |
|
810 | 810 | |
|
811 | 811 | if patcherr: |
|
812 | 812 | self.ui.warn(_("patch failed, rejects left in working dir\n")) |
|
813 | 813 | err = 2 |
|
814 | 814 | break |
|
815 | 815 | |
|
816 | 816 | if fuzz and strict: |
|
817 | 817 | self.ui.warn(_("fuzz found when applying patch, stopping\n")) |
|
818 | 818 | err = 3 |
|
819 | 819 | break |
|
820 | 820 | return (err, n) |
|
821 | 821 | |
|
822 | 822 | def _cleanup(self, patches, numrevs, keep=False): |
|
823 | 823 | if not keep: |
|
824 | 824 | r = self.qrepo() |
|
825 | 825 | if r: |
|
826 | 826 | r[None].forget(patches) |
|
827 | 827 | for p in patches: |
|
828 | 828 | os.unlink(self.join(p)) |
|
829 | 829 | |
|
830 | 830 | qfinished = [] |
|
831 | 831 | if numrevs: |
|
832 | 832 | qfinished = self.applied[:numrevs] |
|
833 | 833 | del self.applied[:numrevs] |
|
834 | 834 | self.applieddirty = True |
|
835 | 835 | |
|
836 | 836 | unknown = [] |
|
837 | 837 | |
|
838 | 838 | for (i, p) in sorted([(self.findseries(p), p) for p in patches], |
|
839 | 839 | reverse=True): |
|
840 | 840 | if i is not None: |
|
841 | 841 | del self.fullseries[i] |
|
842 | 842 | else: |
|
843 | 843 | unknown.append(p) |
|
844 | 844 | |
|
845 | 845 | if unknown: |
|
846 | 846 | if numrevs: |
|
847 | 847 | rev = dict((entry.name, entry.node) for entry in qfinished) |
|
848 | 848 | for p in unknown: |
|
849 | 849 | msg = _('revision %s refers to unknown patches: %s\n') |
|
850 | 850 | self.ui.warn(msg % (short(rev[p]), p)) |
|
851 | 851 | else: |
|
852 | 852 | msg = _('unknown patches: %s\n') |
|
853 | 853 | raise util.Abort(''.join(msg % p for p in unknown)) |
|
854 | 854 | |
|
855 | 855 | self.parseseries() |
|
856 | 856 | self.seriesdirty = True |
|
857 | 857 | return [entry.node for entry in qfinished] |
|
858 | 858 | |
|
859 | 859 | def _revpatches(self, repo, revs): |
|
860 | 860 | firstrev = repo[self.applied[0].node].rev() |
|
861 | 861 | patches = [] |
|
862 | 862 | for i, rev in enumerate(revs): |
|
863 | 863 | |
|
864 | 864 | if rev < firstrev: |
|
865 | 865 | raise util.Abort(_('revision %d is not managed') % rev) |
|
866 | 866 | |
|
867 | 867 | ctx = repo[rev] |
|
868 | 868 | base = self.applied[i].node |
|
869 | 869 | if ctx.node() != base: |
|
870 | 870 | msg = _('cannot delete revision %d above applied patches') |
|
871 | 871 | raise util.Abort(msg % rev) |
|
872 | 872 | |
|
873 | 873 | patch = self.applied[i].name |
|
874 | 874 | for fmt in ('[mq]: %s', 'imported patch %s'): |
|
875 | 875 | if ctx.description() == fmt % patch: |
|
876 | 876 | msg = _('patch %s finalized without changeset message\n') |
|
877 | 877 | repo.ui.status(msg % patch) |
|
878 | 878 | break |
|
879 | 879 | |
|
880 | 880 | patches.append(patch) |
|
881 | 881 | return patches |
|
882 | 882 | |
|
883 | 883 | def finish(self, repo, revs): |
|
884 | 884 | # Manually trigger phase computation to ensure phasedefaults is |
|
885 | 885 | # executed before we remove the patches. |
|
886 | 886 | repo._phasecache |
|
887 | 887 | patches = self._revpatches(repo, sorted(revs)) |
|
888 | 888 | qfinished = self._cleanup(patches, len(patches)) |
|
889 | 889 | if qfinished and repo.ui.configbool('mq', 'secret', False): |
|
890 | 890 | # only use this logic when the secret option is added |
|
891 | 891 | oldqbase = repo[qfinished[0]] |
|
892 | 892 | tphase = repo.ui.config('phases', 'new-commit', phases.draft) |
|
893 | 893 | if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase: |
|
894 | 894 | phases.advanceboundary(repo, tphase, qfinished) |
|
895 | 895 | |
|
896 | 896 | def delete(self, repo, patches, opts): |
|
897 | 897 | if not patches and not opts.get('rev'): |
|
898 | 898 | raise util.Abort(_('qdelete requires at least one revision or ' |
|
899 | 899 | 'patch name')) |
|
900 | 900 | |
|
901 | 901 | realpatches = [] |
|
902 | 902 | for patch in patches: |
|
903 | 903 | patch = self.lookup(patch, strict=True) |
|
904 | 904 | info = self.isapplied(patch) |
|
905 | 905 | if info: |
|
906 | 906 | raise util.Abort(_("cannot delete applied patch %s") % patch) |
|
907 | 907 | if patch not in self.series: |
|
908 | 908 | raise util.Abort(_("patch %s not in series file") % patch) |
|
909 | 909 | if patch not in realpatches: |
|
910 | 910 | realpatches.append(patch) |
|
911 | 911 | |
|
912 | 912 | numrevs = 0 |
|
913 | 913 | if opts.get('rev'): |
|
914 | 914 | if not self.applied: |
|
915 | 915 | raise util.Abort(_('no patches applied')) |
|
916 | 916 | revs = scmutil.revrange(repo, opts.get('rev')) |
|
917 | 917 | if len(revs) > 1 and revs[0] > revs[1]: |
|
918 | 918 | revs.reverse() |
|
919 | 919 | revpatches = self._revpatches(repo, revs) |
|
920 | 920 | realpatches += revpatches |
|
921 | 921 | numrevs = len(revpatches) |
|
922 | 922 | |
|
923 | 923 | self._cleanup(realpatches, numrevs, opts.get('keep')) |
|
924 | 924 | |
|
925 | 925 | def checktoppatch(self, repo): |
|
926 | 926 | if self.applied: |
|
927 | 927 | top = self.applied[-1].node |
|
928 | 928 | patch = self.applied[-1].name |
|
929 | 929 | pp = repo.dirstate.parents() |
|
930 | 930 | if top not in pp: |
|
931 | 931 | raise util.Abort(_("working directory revision is not qtip")) |
|
932 | 932 | return top, patch |
|
933 | 933 | return None, None |
|
934 | 934 | |
|
935 | 935 | def checksubstate(self, repo): |
|
936 | 936 | '''return list of subrepos at a different revision than substate. |
|
937 | 937 | Abort if any subrepos have uncommitted changes.''' |
|
938 | 938 | inclsubs = [] |
|
939 | 939 | wctx = repo[None] |
|
940 | 940 | for s in wctx.substate: |
|
941 | 941 | if wctx.sub(s).dirty(True): |
|
942 | 942 | raise util.Abort( |
|
943 | 943 | _("uncommitted changes in subrepository %s") % s) |
|
944 | 944 | elif wctx.sub(s).dirty(): |
|
945 | 945 | inclsubs.append(s) |
|
946 | 946 | return inclsubs |
|
947 | 947 | |
|
948 | 948 | def localchangesfound(self, refresh=True): |
|
949 | 949 | if refresh: |
|
950 | 950 | raise util.Abort(_("local changes found, refresh first")) |
|
951 | 951 | else: |
|
952 | 952 | raise util.Abort(_("local changes found")) |
|
953 | 953 | |
|
954 | 954 | def checklocalchanges(self, repo, force=False, refresh=True): |
|
955 | 955 | m, a, r, d = repo.status()[:4] |
|
956 | 956 | if (m or a or r or d) and not force: |
|
957 | 957 | self.localchangesfound(refresh) |
|
958 | 958 | return m, a, r, d |
|
959 | 959 | |
|
960 | 960 | _reserved = ('series', 'status', 'guards', '.', '..') |
|
961 | 961 | def checkreservedname(self, name): |
|
962 | 962 | if name in self._reserved: |
|
963 | 963 | raise util.Abort(_('"%s" cannot be used as the name of a patch') |
|
964 | 964 | % name) |
|
965 | 965 | for prefix in ('.hg', '.mq'): |
|
966 | 966 | if name.startswith(prefix): |
|
967 | 967 | raise util.Abort(_('patch name cannot begin with "%s"') |
|
968 | 968 | % prefix) |
|
969 | 969 | for c in ('#', ':'): |
|
970 | 970 | if c in name: |
|
971 | 971 | raise util.Abort(_('"%s" cannot be used in the name of a patch') |
|
972 | 972 | % c) |
|
973 | 973 | |
|
974 | 974 | def checkpatchname(self, name, force=False): |
|
975 | 975 | self.checkreservedname(name) |
|
976 | 976 | if not force and os.path.exists(self.join(name)): |
|
977 | 977 | if os.path.isdir(self.join(name)): |
|
978 | 978 | raise util.Abort(_('"%s" already exists as a directory') |
|
979 | 979 | % name) |
|
980 | 980 | else: |
|
981 | 981 | raise util.Abort(_('patch "%s" already exists') % name) |
|
982 | 982 | |
|
983 | 983 | def checkforcecheck(self, check, force): |
|
984 | 984 | if force and check: |
|
985 | 985 | raise util.Abort(_('cannot use both --force and --check')) |
|
986 | 986 | |
|
987 | 987 | def new(self, repo, patchfn, *pats, **opts): |
|
988 | 988 | """options: |
|
989 | 989 | msg: a string or a no-argument function returning a string |
|
990 | 990 | """ |
|
991 | 991 | msg = opts.get('msg') |
|
992 | 992 | user = opts.get('user') |
|
993 | 993 | date = opts.get('date') |
|
994 | 994 | if date: |
|
995 | 995 | date = util.parsedate(date) |
|
996 | 996 | diffopts = self.diffopts({'git': opts.get('git')}) |
|
997 | 997 | if opts.get('checkname', True): |
|
998 | 998 | self.checkpatchname(patchfn) |
|
999 | 999 | inclsubs = self.checksubstate(repo) |
|
1000 | 1000 | if inclsubs: |
|
1001 | 1001 | inclsubs.append('.hgsubstate') |
|
1002 | 1002 | substatestate = repo.dirstate['.hgsubstate'] |
|
1003 | 1003 | if opts.get('include') or opts.get('exclude') or pats: |
|
1004 | 1004 | if inclsubs: |
|
1005 | 1005 | pats = list(pats or []) + inclsubs |
|
1006 | 1006 | match = scmutil.match(repo[None], pats, opts) |
|
1007 | 1007 | # detect missing files in pats |
|
1008 | 1008 | def badfn(f, msg): |
|
1009 | 1009 | if f != '.hgsubstate': # .hgsubstate is auto-created |
|
1010 | 1010 | raise util.Abort('%s: %s' % (f, msg)) |
|
1011 | 1011 | match.bad = badfn |
|
1012 | 1012 | changes = repo.status(match=match) |
|
1013 | 1013 | m, a, r, d = changes[:4] |
|
1014 | 1014 | else: |
|
1015 | 1015 | changes = self.checklocalchanges(repo, force=True) |
|
1016 | 1016 | m, a, r, d = changes |
|
1017 | 1017 | match = scmutil.matchfiles(repo, m + a + r + inclsubs) |
|
1018 | 1018 | if len(repo[None].parents()) > 1: |
|
1019 | 1019 | raise util.Abort(_('cannot manage merge changesets')) |
|
1020 | 1020 | commitfiles = m + a + r |
|
1021 | 1021 | self.checktoppatch(repo) |
|
1022 | 1022 | insert = self.fullseriesend() |
|
1023 | 1023 | wlock = repo.wlock() |
|
1024 | 1024 | try: |
|
1025 | 1025 | try: |
|
1026 | 1026 | # if patch file write fails, abort early |
|
1027 | 1027 | p = self.opener(patchfn, "w") |
|
1028 | 1028 | except IOError, e: |
|
1029 | 1029 | raise util.Abort(_('cannot write patch "%s": %s') |
|
1030 | 1030 | % (patchfn, e.strerror)) |
|
1031 | 1031 | try: |
|
1032 | 1032 | if self.plainmode: |
|
1033 | 1033 | if user: |
|
1034 | 1034 | p.write("From: " + user + "\n") |
|
1035 | 1035 | if not date: |
|
1036 | 1036 | p.write("\n") |
|
1037 | 1037 | if date: |
|
1038 | 1038 | p.write("Date: %d %d\n\n" % date) |
|
1039 | 1039 | else: |
|
1040 | 1040 | p.write("# HG changeset patch\n") |
|
1041 | 1041 | p.write("# Parent " |
|
1042 | 1042 | + hex(repo[None].p1().node()) + "\n") |
|
1043 | 1043 | if user: |
|
1044 | 1044 | p.write("# User " + user + "\n") |
|
1045 | 1045 | if date: |
|
1046 | 1046 | p.write("# Date %s %s\n\n" % date) |
|
1047 | 1047 | if util.safehasattr(msg, '__call__'): |
|
1048 | 1048 | msg = msg() |
|
1049 | 1049 | commitmsg = msg and msg or ("[mq]: %s" % patchfn) |
|
1050 | 1050 | n = newcommit(repo, None, commitmsg, user, date, match=match, |
|
1051 | 1051 | force=True) |
|
1052 | 1052 | if n is None: |
|
1053 | 1053 | raise util.Abort(_("repo commit failed")) |
|
1054 | 1054 | try: |
|
1055 | 1055 | self.fullseries[insert:insert] = [patchfn] |
|
1056 | 1056 | self.applied.append(statusentry(n, patchfn)) |
|
1057 | 1057 | self.parseseries() |
|
1058 | 1058 | self.seriesdirty = True |
|
1059 | 1059 | self.applieddirty = True |
|
1060 | 1060 | if msg: |
|
1061 | 1061 | msg = msg + "\n\n" |
|
1062 | 1062 | p.write(msg) |
|
1063 | 1063 | if commitfiles: |
|
1064 | 1064 | parent = self.qparents(repo, n) |
|
1065 | 1065 | if inclsubs: |
|
1066 | 1066 | if substatestate in 'a?': |
|
1067 | 1067 | changes[1].append('.hgsubstate') |
|
1068 | 1068 | elif substatestate in 'r': |
|
1069 | 1069 | changes[2].append('.hgsubstate') |
|
1070 | 1070 | else: # modified |
|
1071 | 1071 | changes[0].append('.hgsubstate') |
|
1072 | 1072 | chunks = patchmod.diff(repo, node1=parent, node2=n, |
|
1073 | 1073 | changes=changes, opts=diffopts) |
|
1074 | 1074 | for chunk in chunks: |
|
1075 | 1075 | p.write(chunk) |
|
1076 | 1076 | p.close() |
|
1077 | 1077 | r = self.qrepo() |
|
1078 | 1078 | if r: |
|
1079 | 1079 | r[None].add([patchfn]) |
|
1080 | 1080 | except: |
|
1081 | 1081 | repo.rollback() |
|
1082 | 1082 | raise |
|
1083 | 1083 | except Exception: |
|
1084 | 1084 | patchpath = self.join(patchfn) |
|
1085 | 1085 | try: |
|
1086 | 1086 | os.unlink(patchpath) |
|
1087 | 1087 | except: |
|
1088 | 1088 | self.ui.warn(_('error unlinking %s\n') % patchpath) |
|
1089 | 1089 | raise |
|
1090 | 1090 | self.removeundo(repo) |
|
1091 | 1091 | finally: |
|
1092 | 1092 | release(wlock) |
|
1093 | 1093 | |
|
1094 | 1094 | def strip(self, repo, revs, update=True, backup="all", force=None): |
|
1095 | 1095 | wlock = lock = None |
|
1096 | 1096 | try: |
|
1097 | 1097 | wlock = repo.wlock() |
|
1098 | 1098 | lock = repo.lock() |
|
1099 | 1099 | |
|
1100 | 1100 | if update: |
|
1101 | 1101 | self.checklocalchanges(repo, force=force, refresh=False) |
|
1102 | 1102 | urev = self.qparents(repo, revs[0]) |
|
1103 | 1103 | hg.clean(repo, urev) |
|
1104 | 1104 | repo.dirstate.write() |
|
1105 | 1105 | |
|
1106 | 1106 | repair.strip(self.ui, repo, revs, backup) |
|
1107 | 1107 | finally: |
|
1108 | 1108 | release(lock, wlock) |
|
1109 | 1109 | |
|
1110 | 1110 | def isapplied(self, patch): |
|
1111 | 1111 | """returns (index, rev, patch)""" |
|
1112 | 1112 | for i, a in enumerate(self.applied): |
|
1113 | 1113 | if a.name == patch: |
|
1114 | 1114 | return (i, a.node, a.name) |
|
1115 | 1115 | return None |
|
1116 | 1116 | |
|
1117 | 1117 | # if the exact patch name does not exist, we try a few |
|
1118 | 1118 | # variations. If strict is passed, we try only #1 |
|
1119 | 1119 | # |
|
1120 | 1120 | # 1) a number (as string) to indicate an offset in the series file |
|
1121 | 1121 | # 2) a unique substring of the patch name was given |
|
1122 | 1122 | # 3) patchname[-+]num to indicate an offset in the series file |
|
1123 | 1123 | def lookup(self, patch, strict=False): |
|
1124 | 1124 | def partialname(s): |
|
1125 | 1125 | if s in self.series: |
|
1126 | 1126 | return s |
|
1127 | 1127 | matches = [x for x in self.series if s in x] |
|
1128 | 1128 | if len(matches) > 1: |
|
1129 | 1129 | self.ui.warn(_('patch name "%s" is ambiguous:\n') % s) |
|
1130 | 1130 | for m in matches: |
|
1131 | 1131 | self.ui.warn(' %s\n' % m) |
|
1132 | 1132 | return None |
|
1133 | 1133 | if matches: |
|
1134 | 1134 | return matches[0] |
|
1135 | 1135 | if self.series and self.applied: |
|
1136 | 1136 | if s == 'qtip': |
|
1137 | 1137 | return self.series[self.seriesend(True)-1] |
|
1138 | 1138 | if s == 'qbase': |
|
1139 | 1139 | return self.series[0] |
|
1140 | 1140 | return None |
|
1141 | 1141 | |
|
1142 | 1142 | if patch in self.series: |
|
1143 | 1143 | return patch |
|
1144 | 1144 | |
|
1145 | 1145 | if not os.path.isfile(self.join(patch)): |
|
1146 | 1146 | try: |
|
1147 | 1147 | sno = int(patch) |
|
1148 | 1148 | except (ValueError, OverflowError): |
|
1149 | 1149 | pass |
|
1150 | 1150 | else: |
|
1151 | 1151 | if -len(self.series) <= sno < len(self.series): |
|
1152 | 1152 | return self.series[sno] |
|
1153 | 1153 | |
|
1154 | 1154 | if not strict: |
|
1155 | 1155 | res = partialname(patch) |
|
1156 | 1156 | if res: |
|
1157 | 1157 | return res |
|
1158 | 1158 | minus = patch.rfind('-') |
|
1159 | 1159 | if minus >= 0: |
|
1160 | 1160 | res = partialname(patch[:minus]) |
|
1161 | 1161 | if res: |
|
1162 | 1162 | i = self.series.index(res) |
|
1163 | 1163 | try: |
|
1164 | 1164 | off = int(patch[minus + 1:] or 1) |
|
1165 | 1165 | except (ValueError, OverflowError): |
|
1166 | 1166 | pass |
|
1167 | 1167 | else: |
|
1168 | 1168 | if i - off >= 0: |
|
1169 | 1169 | return self.series[i - off] |
|
1170 | 1170 | plus = patch.rfind('+') |
|
1171 | 1171 | if plus >= 0: |
|
1172 | 1172 | res = partialname(patch[:plus]) |
|
1173 | 1173 | if res: |
|
1174 | 1174 | i = self.series.index(res) |
|
1175 | 1175 | try: |
|
1176 | 1176 | off = int(patch[plus + 1:] or 1) |
|
1177 | 1177 | except (ValueError, OverflowError): |
|
1178 | 1178 | pass |
|
1179 | 1179 | else: |
|
1180 | 1180 | if i + off < len(self.series): |
|
1181 | 1181 | return self.series[i + off] |
|
1182 | 1182 | raise util.Abort(_("patch %s not in series") % patch) |
|
1183 | 1183 | |
|
1184 | 1184 | def push(self, repo, patch=None, force=False, list=False, mergeq=None, |
|
1185 | 1185 | all=False, move=False, exact=False, nobackup=False, check=False): |
|
1186 | 1186 | self.checkforcecheck(check, force) |
|
1187 | 1187 | diffopts = self.diffopts() |
|
1188 | 1188 | wlock = repo.wlock() |
|
1189 | 1189 | try: |
|
1190 | 1190 | heads = [] |
|
1191 | 1191 | for b, ls in repo.branchmap().iteritems(): |
|
1192 | 1192 | heads += ls |
|
1193 | 1193 | if not heads: |
|
1194 | 1194 | heads = [nullid] |
|
1195 | 1195 | if repo.dirstate.p1() not in heads and not exact: |
|
1196 | 1196 | self.ui.status(_("(working directory not at a head)\n")) |
|
1197 | 1197 | |
|
1198 | 1198 | if not self.series: |
|
1199 | 1199 | self.ui.warn(_('no patches in series\n')) |
|
1200 | 1200 | return 0 |
|
1201 | 1201 | |
|
1202 | 1202 | # Suppose our series file is: A B C and the current 'top' |
|
1203 | 1203 | # patch is B. qpush C should be performed (moving forward) |
|
1204 | 1204 | # qpush B is a NOP (no change) qpush A is an error (can't |
|
1205 | 1205 | # go backwards with qpush) |
|
1206 | 1206 | if patch: |
|
1207 | 1207 | patch = self.lookup(patch) |
|
1208 | 1208 | info = self.isapplied(patch) |
|
1209 | 1209 | if info and info[0] >= len(self.applied) - 1: |
|
1210 | 1210 | self.ui.warn( |
|
1211 | 1211 | _('qpush: %s is already at the top\n') % patch) |
|
1212 | 1212 | return 0 |
|
1213 | 1213 | |
|
1214 | 1214 | pushable, reason = self.pushable(patch) |
|
1215 | 1215 | if pushable: |
|
1216 | 1216 | if self.series.index(patch) < self.seriesend(): |
|
1217 | 1217 | raise util.Abort( |
|
1218 | 1218 | _("cannot push to a previous patch: %s") % patch) |
|
1219 | 1219 | else: |
|
1220 | 1220 | if reason: |
|
1221 | 1221 | reason = _('guarded by %s') % reason |
|
1222 | 1222 | else: |
|
1223 | 1223 | reason = _('no matching guards') |
|
1224 | 1224 | self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason)) |
|
1225 | 1225 | return 1 |
|
1226 | 1226 | elif all: |
|
1227 | 1227 | patch = self.series[-1] |
|
1228 | 1228 | if self.isapplied(patch): |
|
1229 | 1229 | self.ui.warn(_('all patches are currently applied\n')) |
|
1230 | 1230 | return 0 |
|
1231 | 1231 | |
|
1232 | 1232 | # Following the above example, starting at 'top' of B: |
|
1233 | 1233 | # qpush should be performed (pushes C), but a subsequent |
|
1234 | 1234 | # qpush without an argument is an error (nothing to |
|
1235 | 1235 | # apply). This allows a loop of "...while hg qpush..." to |
|
1236 | 1236 | # work as it detects an error when done |
|
1237 | 1237 | start = self.seriesend() |
|
1238 | 1238 | if start == len(self.series): |
|
1239 | 1239 | self.ui.warn(_('patch series already fully applied\n')) |
|
1240 | 1240 | return 1 |
|
1241 | 1241 | if not force and not check: |
|
1242 | 1242 | self.checklocalchanges(repo, refresh=self.applied) |
|
1243 | 1243 | |
|
1244 | 1244 | if exact: |
|
1245 | 1245 | if check: |
|
1246 | 1246 | raise util.Abort( |
|
1247 | 1247 | _("cannot use --exact and --check together")) |
|
1248 | 1248 | if move: |
|
1249 | 1249 | raise util.Abort(_('cannot use --exact and --move ' |
|
1250 | 1250 | 'together')) |
|
1251 | 1251 | if self.applied: |
|
1252 | 1252 | raise util.Abort(_('cannot push --exact with applied ' |
|
1253 | 1253 | 'patches')) |
|
1254 | 1254 | root = self.series[start] |
|
1255 | 1255 | target = patchheader(self.join(root), self.plainmode).parent |
|
1256 | 1256 | if not target: |
|
1257 | 1257 | raise util.Abort( |
|
1258 | 1258 | _("%s does not have a parent recorded") % root) |
|
1259 | 1259 | if not repo[target] == repo['.']: |
|
1260 | 1260 | hg.update(repo, target) |
|
1261 | 1261 | |
|
1262 | 1262 | if move: |
|
1263 | 1263 | if not patch: |
|
1264 | 1264 | raise util.Abort(_("please specify the patch to move")) |
|
1265 | 1265 | for fullstart, rpn in enumerate(self.fullseries): |
|
1266 | 1266 | # strip markers for patch guards |
|
1267 | 1267 | if self.guard_re.split(rpn, 1)[0] == self.series[start]: |
|
1268 | 1268 | break |
|
1269 | 1269 | for i, rpn in enumerate(self.fullseries[fullstart:]): |
|
1270 | 1270 | # strip markers for patch guards |
|
1271 | 1271 | if self.guard_re.split(rpn, 1)[0] == patch: |
|
1272 | 1272 | break |
|
1273 | 1273 | index = fullstart + i |
|
1274 | 1274 | assert index < len(self.fullseries) |
|
1275 | 1275 | fullpatch = self.fullseries[index] |
|
1276 | 1276 | del self.fullseries[index] |
|
1277 | 1277 | self.fullseries.insert(fullstart, fullpatch) |
|
1278 | 1278 | self.parseseries() |
|
1279 | 1279 | self.seriesdirty = True |
|
1280 | 1280 | |
|
1281 | 1281 | self.applieddirty = True |
|
1282 | 1282 | if start > 0: |
|
1283 | 1283 | self.checktoppatch(repo) |
|
1284 | 1284 | if not patch: |
|
1285 | 1285 | patch = self.series[start] |
|
1286 | 1286 | end = start + 1 |
|
1287 | 1287 | else: |
|
1288 | 1288 | end = self.series.index(patch, start) + 1 |
|
1289 | 1289 | |
|
1290 | 1290 | tobackup = set() |
|
1291 | 1291 | if (not nobackup and force) or check: |
|
1292 | 1292 | m, a, r, d = self.checklocalchanges(repo, force=True) |
|
1293 | 1293 | if check: |
|
1294 | 1294 | tobackup.update(m + a + r + d) |
|
1295 | 1295 | else: |
|
1296 | 1296 | tobackup.update(m + a) |
|
1297 | 1297 | |
|
1298 | 1298 | s = self.series[start:end] |
|
1299 | 1299 | all_files = set() |
|
1300 | 1300 | try: |
|
1301 | 1301 | if mergeq: |
|
1302 | 1302 | ret = self.mergepatch(repo, mergeq, s, diffopts) |
|
1303 | 1303 | else: |
|
1304 | 1304 | ret = self.apply(repo, s, list, all_files=all_files, |
|
1305 | 1305 | tobackup=tobackup, check=check) |
|
1306 | 1306 | except: |
|
1307 | 1307 | self.ui.warn(_('cleaning up working directory...')) |
|
1308 | 1308 | node = repo.dirstate.p1() |
|
1309 | 1309 | hg.revert(repo, node, None) |
|
1310 | 1310 | # only remove unknown files that we know we touched or |
|
1311 | 1311 | # created while patching |
|
1312 | 1312 | for f in all_files: |
|
1313 | 1313 | if f not in repo.dirstate: |
|
1314 | 1314 | try: |
|
1315 | 1315 | util.unlinkpath(repo.wjoin(f)) |
|
1316 | 1316 | except OSError, inst: |
|
1317 | 1317 | if inst.errno != errno.ENOENT: |
|
1318 | 1318 | raise |
|
1319 | 1319 | self.ui.warn(_('done\n')) |
|
1320 | 1320 | raise |
|
1321 | 1321 | |
|
1322 | 1322 | if not self.applied: |
|
1323 | 1323 | return ret[0] |
|
1324 | 1324 | top = self.applied[-1].name |
|
1325 | 1325 | if ret[0] and ret[0] > 1: |
|
1326 | 1326 | msg = _("errors during apply, please fix and refresh %s\n") |
|
1327 | 1327 | self.ui.write(msg % top) |
|
1328 | 1328 | else: |
|
1329 | 1329 | self.ui.write(_("now at: %s\n") % top) |
|
1330 | 1330 | return ret[0] |
|
1331 | 1331 | |
|
1332 | 1332 | finally: |
|
1333 | 1333 | wlock.release() |
|
1334 | 1334 | |
|
1335 | 1335 | def pop(self, repo, patch=None, force=False, update=True, all=False, |
|
1336 | 1336 | nobackup=False, check=False): |
|
1337 | 1337 | self.checkforcecheck(check, force) |
|
1338 | 1338 | wlock = repo.wlock() |
|
1339 | 1339 | try: |
|
1340 | 1340 | if patch: |
|
1341 | 1341 | # index, rev, patch |
|
1342 | 1342 | info = self.isapplied(patch) |
|
1343 | 1343 | if not info: |
|
1344 | 1344 | patch = self.lookup(patch) |
|
1345 | 1345 | info = self.isapplied(patch) |
|
1346 | 1346 | if not info: |
|
1347 | 1347 | raise util.Abort(_("patch %s is not applied") % patch) |
|
1348 | 1348 | |
|
1349 | 1349 | if not self.applied: |
|
1350 | 1350 | # Allow qpop -a to work repeatedly, |
|
1351 | 1351 | # but not qpop without an argument |
|
1352 | 1352 | self.ui.warn(_("no patches applied\n")) |
|
1353 | 1353 | return not all |
|
1354 | 1354 | |
|
1355 | 1355 | if all: |
|
1356 | 1356 | start = 0 |
|
1357 | 1357 | elif patch: |
|
1358 | 1358 | start = info[0] + 1 |
|
1359 | 1359 | else: |
|
1360 | 1360 | start = len(self.applied) - 1 |
|
1361 | 1361 | |
|
1362 | 1362 | if start >= len(self.applied): |
|
1363 | 1363 | self.ui.warn(_("qpop: %s is already at the top\n") % patch) |
|
1364 | 1364 | return |
|
1365 | 1365 | |
|
1366 | 1366 | if not update: |
|
1367 | 1367 | parents = repo.dirstate.parents() |
|
1368 | 1368 | rr = [x.node for x in self.applied] |
|
1369 | 1369 | for p in parents: |
|
1370 | 1370 | if p in rr: |
|
1371 | 1371 | self.ui.warn(_("qpop: forcing dirstate update\n")) |
|
1372 | 1372 | update = True |
|
1373 | 1373 | else: |
|
1374 | 1374 | parents = [p.node() for p in repo[None].parents()] |
|
1375 | 1375 | needupdate = False |
|
1376 | 1376 | for entry in self.applied[start:]: |
|
1377 | 1377 | if entry.node in parents: |
|
1378 | 1378 | needupdate = True |
|
1379 | 1379 | break |
|
1380 | 1380 | update = needupdate |
|
1381 | 1381 | |
|
1382 | 1382 | tobackup = set() |
|
1383 | 1383 | if update: |
|
1384 | 1384 | m, a, r, d = self.checklocalchanges(repo, force=force or check) |
|
1385 | 1385 | if force: |
|
1386 | 1386 | if not nobackup: |
|
1387 | 1387 | tobackup.update(m + a) |
|
1388 | 1388 | elif check: |
|
1389 | 1389 | tobackup.update(m + a + r + d) |
|
1390 | 1390 | |
|
1391 | 1391 | self.applieddirty = True |
|
1392 | 1392 | end = len(self.applied) |
|
1393 | 1393 | rev = self.applied[start].node |
|
1394 | 1394 | if update: |
|
1395 | 1395 | top = self.checktoppatch(repo)[0] |
|
1396 | 1396 | |
|
1397 | 1397 | try: |
|
1398 | 1398 | heads = repo.changelog.heads(rev) |
|
1399 | 1399 | except error.LookupError: |
|
1400 | 1400 | node = short(rev) |
|
1401 | 1401 | raise util.Abort(_('trying to pop unknown node %s') % node) |
|
1402 | 1402 | |
|
1403 | 1403 | if heads != [self.applied[-1].node]: |
|
1404 | 1404 | raise util.Abort(_("popping would remove a revision not " |
|
1405 | 1405 | "managed by this patch queue")) |
|
1406 | 1406 | if not repo[self.applied[-1].node].mutable(): |
|
1407 | 1407 | raise util.Abort( |
|
1408 | 1408 | _("popping would remove an immutable revision"), |
|
1409 | 1409 | hint=_('see "hg help phases" for details')) |
|
1410 | 1410 | |
|
1411 | 1411 | # we know there are no local changes, so we can make a simplified |
|
1412 | 1412 | # form of hg.update. |
|
1413 | 1413 | if update: |
|
1414 | 1414 | qp = self.qparents(repo, rev) |
|
1415 | 1415 | ctx = repo[qp] |
|
1416 | 1416 | m, a, r, d = repo.status(qp, top)[:4] |
|
1417 | 1417 | if d: |
|
1418 | 1418 | raise util.Abort(_("deletions found between repo revs")) |
|
1419 | 1419 | |
|
1420 | 1420 | tobackup = set(a + m + r) & tobackup |
|
1421 | 1421 | if check and tobackup: |
|
1422 | 1422 | self.localchangesfound() |
|
1423 | 1423 | self.backup(repo, tobackup) |
|
1424 | 1424 | |
|
1425 | 1425 | for f in a: |
|
1426 | 1426 | try: |
|
1427 | 1427 | util.unlinkpath(repo.wjoin(f)) |
|
1428 | 1428 | except OSError, e: |
|
1429 | 1429 | if e.errno != errno.ENOENT: |
|
1430 | 1430 | raise |
|
1431 | 1431 | repo.dirstate.drop(f) |
|
1432 | 1432 | for f in m + r: |
|
1433 | 1433 | fctx = ctx[f] |
|
1434 | 1434 | repo.wwrite(f, fctx.data(), fctx.flags()) |
|
1435 | 1435 | repo.dirstate.normal(f) |
|
1436 | 1436 | repo.setparents(qp, nullid) |
|
1437 | 1437 | for patch in reversed(self.applied[start:end]): |
|
1438 | 1438 | self.ui.status(_("popping %s\n") % patch.name) |
|
1439 | 1439 | del self.applied[start:end] |
|
1440 | 1440 | self.strip(repo, [rev], update=False, backup='strip') |
|
1441 | 1441 | if self.applied: |
|
1442 | 1442 | self.ui.write(_("now at: %s\n") % self.applied[-1].name) |
|
1443 | 1443 | else: |
|
1444 | 1444 | self.ui.write(_("patch queue now empty\n")) |
|
1445 | 1445 | finally: |
|
1446 | 1446 | wlock.release() |
|
1447 | 1447 | |
|
1448 | 1448 | def diff(self, repo, pats, opts): |
|
1449 | 1449 | top, patch = self.checktoppatch(repo) |
|
1450 | 1450 | if not top: |
|
1451 | 1451 | self.ui.write(_("no patches applied\n")) |
|
1452 | 1452 | return |
|
1453 | 1453 | qp = self.qparents(repo, top) |
|
1454 | 1454 | if opts.get('reverse'): |
|
1455 | 1455 | node1, node2 = None, qp |
|
1456 | 1456 | else: |
|
1457 | 1457 | node1, node2 = qp, None |
|
1458 | 1458 | diffopts = self.diffopts(opts, patch) |
|
1459 | 1459 | self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts) |
|
1460 | 1460 | |
|
1461 | 1461 | def refresh(self, repo, pats=None, **opts): |
|
1462 | 1462 | if not self.applied: |
|
1463 | 1463 | self.ui.write(_("no patches applied\n")) |
|
1464 | 1464 | return 1 |
|
1465 | 1465 | msg = opts.get('msg', '').rstrip() |
|
1466 | 1466 | newuser = opts.get('user') |
|
1467 | 1467 | newdate = opts.get('date') |
|
1468 | 1468 | if newdate: |
|
1469 | 1469 | newdate = '%d %d' % util.parsedate(newdate) |
|
1470 | 1470 | wlock = repo.wlock() |
|
1471 | 1471 | |
|
1472 | 1472 | try: |
|
1473 | 1473 | self.checktoppatch(repo) |
|
1474 | 1474 | (top, patchfn) = (self.applied[-1].node, self.applied[-1].name) |
|
1475 | 1475 | if repo.changelog.heads(top) != [top]: |
|
1476 | 1476 | raise util.Abort(_("cannot refresh a revision with children")) |
|
1477 | 1477 | if not repo[top].mutable(): |
|
1478 | 1478 | raise util.Abort(_("cannot refresh immutable revision"), |
|
1479 | 1479 | hint=_('see "hg help phases" for details')) |
|
1480 | 1480 | |
|
1481 | 1481 | inclsubs = self.checksubstate(repo) |
|
1482 | 1482 | |
|
1483 | 1483 | cparents = repo.changelog.parents(top) |
|
1484 | 1484 | patchparent = self.qparents(repo, top) |
|
1485 | 1485 | ph = patchheader(self.join(patchfn), self.plainmode) |
|
1486 | 1486 | diffopts = self.diffopts({'git': opts.get('git')}, patchfn) |
|
1487 | 1487 | if msg: |
|
1488 | 1488 | ph.setmessage(msg) |
|
1489 | 1489 | if newuser: |
|
1490 | 1490 | ph.setuser(newuser) |
|
1491 | 1491 | if newdate: |
|
1492 | 1492 | ph.setdate(newdate) |
|
1493 | 1493 | ph.setparent(hex(patchparent)) |
|
1494 | 1494 | |
|
1495 | 1495 | # only commit new patch when write is complete |
|
1496 | 1496 | patchf = self.opener(patchfn, 'w', atomictemp=True) |
|
1497 | 1497 | |
|
1498 | 1498 | comments = str(ph) |
|
1499 | 1499 | if comments: |
|
1500 | 1500 | patchf.write(comments) |
|
1501 | 1501 | |
|
1502 | 1502 | # update the dirstate in place, strip off the qtip commit |
|
1503 | 1503 | # and then commit. |
|
1504 | 1504 | # |
|
1505 | 1505 | # this should really read: |
|
1506 | 1506 | # mm, dd, aa = repo.status(top, patchparent)[:3] |
|
1507 | 1507 | # but we do it backwards to take advantage of manifest/chlog |
|
1508 | 1508 | # caching against the next repo.status call |
|
1509 | 1509 | mm, aa, dd = repo.status(patchparent, top)[:3] |
|
1510 | 1510 | changes = repo.changelog.read(top) |
|
1511 | 1511 | man = repo.manifest.read(changes[0]) |
|
1512 | 1512 | aaa = aa[:] |
|
1513 | 1513 | matchfn = scmutil.match(repo[None], pats, opts) |
|
1514 | 1514 | # in short mode, we only diff the files included in the |
|
1515 | 1515 | # patch already plus specified files |
|
1516 | 1516 | if opts.get('short'): |
|
1517 | 1517 | # if amending a patch, we start with existing |
|
1518 | 1518 | # files plus specified files - unfiltered |
|
1519 | 1519 | match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files()) |
|
1520 | 1520 | # filter with inc/exl options |
|
1521 | 1521 | matchfn = scmutil.match(repo[None], opts=opts) |
|
1522 | 1522 | else: |
|
1523 | 1523 | match = scmutil.matchall(repo) |
|
1524 | 1524 | m, a, r, d = repo.status(match=match)[:4] |
|
1525 | 1525 | mm = set(mm) |
|
1526 | 1526 | aa = set(aa) |
|
1527 | 1527 | dd = set(dd) |
|
1528 | 1528 | |
|
1529 | 1529 | # we might end up with files that were added between |
|
1530 | 1530 | # qtip and the dirstate parent, but then changed in the |
|
1531 | 1531 | # local dirstate. in this case, we want them to only |
|
1532 | 1532 | # show up in the added section |
|
1533 | 1533 | for x in m: |
|
1534 | 1534 | if x not in aa: |
|
1535 | 1535 | mm.add(x) |
|
1536 | 1536 | # we might end up with files added by the local dirstate that |
|
1537 | 1537 | # were deleted by the patch. In this case, they should only |
|
1538 | 1538 | # show up in the changed section. |
|
1539 | 1539 | for x in a: |
|
1540 | 1540 | if x in dd: |
|
1541 | 1541 | dd.remove(x) |
|
1542 | 1542 | mm.add(x) |
|
1543 | 1543 | else: |
|
1544 | 1544 | aa.add(x) |
|
1545 | 1545 | # make sure any files deleted in the local dirstate |
|
1546 | 1546 | # are not in the add or change column of the patch |
|
1547 | 1547 | forget = [] |
|
1548 | 1548 | for x in d + r: |
|
1549 | 1549 | if x in aa: |
|
1550 | 1550 | aa.remove(x) |
|
1551 | 1551 | forget.append(x) |
|
1552 | 1552 | continue |
|
1553 | 1553 | else: |
|
1554 | 1554 | mm.discard(x) |
|
1555 | 1555 | dd.add(x) |
|
1556 | 1556 | |
|
1557 | 1557 | m = list(mm) |
|
1558 | 1558 | r = list(dd) |
|
1559 | 1559 | a = list(aa) |
|
1560 | 1560 | c = [filter(matchfn, l) for l in (m, a, r)] |
|
1561 | 1561 | match = scmutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs)) |
|
1562 | 1562 | chunks = patchmod.diff(repo, patchparent, match=match, |
|
1563 | 1563 | changes=c, opts=diffopts) |
|
1564 | 1564 | for chunk in chunks: |
|
1565 | 1565 | patchf.write(chunk) |
|
1566 | 1566 | |
|
1567 | 1567 | try: |
|
1568 | 1568 | if diffopts.git or diffopts.upgrade: |
|
1569 | 1569 | copies = {} |
|
1570 | 1570 | for dst in a: |
|
1571 | 1571 | src = repo.dirstate.copied(dst) |
|
1572 | 1572 | # during qfold, the source file for copies may |
|
1573 | 1573 | # be removed. Treat this as a simple add. |
|
1574 | 1574 | if src is not None and src in repo.dirstate: |
|
1575 | 1575 | copies.setdefault(src, []).append(dst) |
|
1576 | 1576 | repo.dirstate.add(dst) |
|
1577 | 1577 | # remember the copies between patchparent and qtip |
|
1578 | 1578 | for dst in aaa: |
|
1579 | 1579 | f = repo.file(dst) |
|
1580 | 1580 | src = f.renamed(man[dst]) |
|
1581 | 1581 | if src: |
|
1582 | 1582 | copies.setdefault(src[0], []).extend( |
|
1583 | 1583 | copies.get(dst, [])) |
|
1584 | 1584 | if dst in a: |
|
1585 | 1585 | copies[src[0]].append(dst) |
|
1586 | 1586 | # we can't copy a file created by the patch itself |
|
1587 | 1587 | if dst in copies: |
|
1588 | 1588 | del copies[dst] |
|
1589 | 1589 | for src, dsts in copies.iteritems(): |
|
1590 | 1590 | for dst in dsts: |
|
1591 | 1591 | repo.dirstate.copy(src, dst) |
|
1592 | 1592 | else: |
|
1593 | 1593 | for dst in a: |
|
1594 | 1594 | repo.dirstate.add(dst) |
|
1595 | 1595 | # Drop useless copy information |
|
1596 | 1596 | for f in list(repo.dirstate.copies()): |
|
1597 | 1597 | repo.dirstate.copy(None, f) |
|
1598 | 1598 | for f in r: |
|
1599 | 1599 | repo.dirstate.remove(f) |
|
1600 | 1600 | # if the patch excludes a modified file, mark that |
|
1601 | 1601 | # file with mtime=0 so status can see it. |
|
1602 | 1602 | mm = [] |
|
1603 | 1603 | for i in xrange(len(m)-1, -1, -1): |
|
1604 | 1604 | if not matchfn(m[i]): |
|
1605 | 1605 | mm.append(m[i]) |
|
1606 | 1606 | del m[i] |
|
1607 | 1607 | for f in m: |
|
1608 | 1608 | repo.dirstate.normal(f) |
|
1609 | 1609 | for f in mm: |
|
1610 | 1610 | repo.dirstate.normallookup(f) |
|
1611 | 1611 | for f in forget: |
|
1612 | 1612 | repo.dirstate.drop(f) |
|
1613 | 1613 | |
|
1614 | 1614 | if not msg: |
|
1615 | 1615 | if not ph.message: |
|
1616 | 1616 | message = "[mq]: %s\n" % patchfn |
|
1617 | 1617 | else: |
|
1618 | 1618 | message = "\n".join(ph.message) |
|
1619 | 1619 | else: |
|
1620 | 1620 | message = msg |
|
1621 | 1621 | |
|
1622 | 1622 | user = ph.user or changes[1] |
|
1623 | 1623 | |
|
1624 | 1624 | oldphase = repo[top].phase() |
|
1625 | 1625 | |
|
1626 | 1626 | # assumes strip can roll itself back if interrupted |
|
1627 | 1627 | repo.setparents(*cparents) |
|
1628 | 1628 | self.applied.pop() |
|
1629 | 1629 | self.applieddirty = True |
|
1630 | 1630 | self.strip(repo, [top], update=False, |
|
1631 | 1631 | backup='strip') |
|
1632 | 1632 | except: |
|
1633 | 1633 | repo.dirstate.invalidate() |
|
1634 | 1634 | raise |
|
1635 | 1635 | |
|
1636 | 1636 | try: |
|
1637 | 1637 | # might be nice to attempt to roll back strip after this |
|
1638 | 1638 | |
|
1639 | 1639 | # Ensure we create a new changeset in the same phase than |
|
1640 | 1640 | # the old one. |
|
1641 | 1641 | n = newcommit(repo, oldphase, message, user, ph.date, |
|
1642 | 1642 | match=match, force=True) |
|
1643 | 1643 | # only write patch after a successful commit |
|
1644 | 1644 | patchf.close() |
|
1645 | 1645 | self.applied.append(statusentry(n, patchfn)) |
|
1646 | 1646 | except: |
|
1647 | 1647 | ctx = repo[cparents[0]] |
|
1648 | 1648 | repo.dirstate.rebuild(ctx.node(), ctx.manifest()) |
|
1649 | 1649 | self.savedirty() |
|
1650 | 1650 | self.ui.warn(_('refresh interrupted while patch was popped! ' |
|
1651 | 1651 | '(revert --all, qpush to recover)\n')) |
|
1652 | 1652 | raise |
|
1653 | 1653 | finally: |
|
1654 | 1654 | wlock.release() |
|
1655 | 1655 | self.removeundo(repo) |
|
1656 | 1656 | |
|
1657 | 1657 | def init(self, repo, create=False): |
|
1658 | 1658 | if not create and os.path.isdir(self.path): |
|
1659 | 1659 | raise util.Abort(_("patch queue directory already exists")) |
|
1660 | 1660 | try: |
|
1661 | 1661 | os.mkdir(self.path) |
|
1662 | 1662 | except OSError, inst: |
|
1663 | 1663 | if inst.errno != errno.EEXIST or not create: |
|
1664 | 1664 | raise |
|
1665 | 1665 | if create: |
|
1666 | 1666 | return self.qrepo(create=True) |
|
1667 | 1667 | |
|
1668 | 1668 | def unapplied(self, repo, patch=None): |
|
1669 | 1669 | if patch and patch not in self.series: |
|
1670 | 1670 | raise util.Abort(_("patch %s is not in series file") % patch) |
|
1671 | 1671 | if not patch: |
|
1672 | 1672 | start = self.seriesend() |
|
1673 | 1673 | else: |
|
1674 | 1674 | start = self.series.index(patch) + 1 |
|
1675 | 1675 | unapplied = [] |
|
1676 | 1676 | for i in xrange(start, len(self.series)): |
|
1677 | 1677 | pushable, reason = self.pushable(i) |
|
1678 | 1678 | if pushable: |
|
1679 | 1679 | unapplied.append((i, self.series[i])) |
|
1680 | 1680 | self.explainpushable(i) |
|
1681 | 1681 | return unapplied |
|
1682 | 1682 | |
|
1683 | 1683 | def qseries(self, repo, missing=None, start=0, length=None, status=None, |
|
1684 | 1684 | summary=False): |
|
1685 | 1685 | def displayname(pfx, patchname, state): |
|
1686 | 1686 | if pfx: |
|
1687 | 1687 | self.ui.write(pfx) |
|
1688 | 1688 | if summary: |
|
1689 | 1689 | ph = patchheader(self.join(patchname), self.plainmode) |
|
1690 | 1690 | msg = ph.message and ph.message[0] or '' |
|
1691 | 1691 | if self.ui.formatted(): |
|
1692 | 1692 | width = self.ui.termwidth() - len(pfx) - len(patchname) - 2 |
|
1693 | 1693 | if width > 0: |
|
1694 | 1694 | msg = util.ellipsis(msg, width) |
|
1695 | 1695 | else: |
|
1696 | 1696 | msg = '' |
|
1697 | 1697 | self.ui.write(patchname, label='qseries.' + state) |
|
1698 | 1698 | self.ui.write(': ') |
|
1699 | 1699 | self.ui.write(msg, label='qseries.message.' + state) |
|
1700 | 1700 | else: |
|
1701 | 1701 | self.ui.write(patchname, label='qseries.' + state) |
|
1702 | 1702 | self.ui.write('\n') |
|
1703 | 1703 | |
|
1704 | 1704 | applied = set([p.name for p in self.applied]) |
|
1705 | 1705 | if length is None: |
|
1706 | 1706 | length = len(self.series) - start |
|
1707 | 1707 | if not missing: |
|
1708 | 1708 | if self.ui.verbose: |
|
1709 | 1709 | idxwidth = len(str(start + length - 1)) |
|
1710 | 1710 | for i in xrange(start, start + length): |
|
1711 | 1711 | patch = self.series[i] |
|
1712 | 1712 | if patch in applied: |
|
1713 | 1713 | char, state = 'A', 'applied' |
|
1714 | 1714 | elif self.pushable(i)[0]: |
|
1715 | 1715 | char, state = 'U', 'unapplied' |
|
1716 | 1716 | else: |
|
1717 | 1717 | char, state = 'G', 'guarded' |
|
1718 | 1718 | pfx = '' |
|
1719 | 1719 | if self.ui.verbose: |
|
1720 | 1720 | pfx = '%*d %s ' % (idxwidth, i, char) |
|
1721 | 1721 | elif status and status != char: |
|
1722 | 1722 | continue |
|
1723 | 1723 | displayname(pfx, patch, state) |
|
1724 | 1724 | else: |
|
1725 | 1725 | msng_list = [] |
|
1726 | 1726 | for root, dirs, files in os.walk(self.path): |
|
1727 | 1727 | d = root[len(self.path) + 1:] |
|
1728 | 1728 | for f in files: |
|
1729 | 1729 | fl = os.path.join(d, f) |
|
1730 | 1730 | if (fl not in self.series and |
|
1731 | 1731 | fl not in (self.statuspath, self.seriespath, |
|
1732 | 1732 | self.guardspath) |
|
1733 | 1733 | and not fl.startswith('.')): |
|
1734 | 1734 | msng_list.append(fl) |
|
1735 | 1735 | for x in sorted(msng_list): |
|
1736 | 1736 | pfx = self.ui.verbose and ('D ') or '' |
|
1737 | 1737 | displayname(pfx, x, 'missing') |
|
1738 | 1738 | |
|
1739 | 1739 | def issaveline(self, l): |
|
1740 | 1740 | if l.name == '.hg.patches.save.line': |
|
1741 | 1741 | return True |
|
1742 | 1742 | |
|
1743 | 1743 | def qrepo(self, create=False): |
|
1744 | 1744 | ui = self.ui.copy() |
|
1745 | 1745 | ui.setconfig('paths', 'default', '', overlay=False) |
|
1746 | 1746 | ui.setconfig('paths', 'default-push', '', overlay=False) |
|
1747 | 1747 | if create or os.path.isdir(self.join(".hg")): |
|
1748 | 1748 | return hg.repository(ui, path=self.path, create=create) |
|
1749 | 1749 | |
|
1750 | 1750 | def restore(self, repo, rev, delete=None, qupdate=None): |
|
1751 | 1751 | desc = repo[rev].description().strip() |
|
1752 | 1752 | lines = desc.splitlines() |
|
1753 | 1753 | i = 0 |
|
1754 | 1754 | datastart = None |
|
1755 | 1755 | series = [] |
|
1756 | 1756 | applied = [] |
|
1757 | 1757 | qpp = None |
|
1758 | 1758 | for i, line in enumerate(lines): |
|
1759 | 1759 | if line == 'Patch Data:': |
|
1760 | 1760 | datastart = i + 1 |
|
1761 | 1761 | elif line.startswith('Dirstate:'): |
|
1762 | 1762 | l = line.rstrip() |
|
1763 | 1763 | l = l[10:].split(' ') |
|
1764 | 1764 | qpp = [bin(x) for x in l] |
|
1765 | 1765 | elif datastart is not None: |
|
1766 | 1766 | l = line.rstrip() |
|
1767 | 1767 | n, name = l.split(':', 1) |
|
1768 | 1768 | if n: |
|
1769 | 1769 | applied.append(statusentry(bin(n), name)) |
|
1770 | 1770 | else: |
|
1771 | 1771 | series.append(l) |
|
1772 | 1772 | if datastart is None: |
|
1773 | 1773 | self.ui.warn(_("No saved patch data found\n")) |
|
1774 | 1774 | return 1 |
|
1775 | 1775 | self.ui.warn(_("restoring status: %s\n") % lines[0]) |
|
1776 | 1776 | self.fullseries = series |
|
1777 | 1777 | self.applied = applied |
|
1778 | 1778 | self.parseseries() |
|
1779 | 1779 | self.seriesdirty = True |
|
1780 | 1780 | self.applieddirty = True |
|
1781 | 1781 | heads = repo.changelog.heads() |
|
1782 | 1782 | if delete: |
|
1783 | 1783 | if rev not in heads: |
|
1784 | 1784 | self.ui.warn(_("save entry has children, leaving it alone\n")) |
|
1785 | 1785 | else: |
|
1786 | 1786 | self.ui.warn(_("removing save entry %s\n") % short(rev)) |
|
1787 | 1787 | pp = repo.dirstate.parents() |
|
1788 | 1788 | if rev in pp: |
|
1789 | 1789 | update = True |
|
1790 | 1790 | else: |
|
1791 | 1791 | update = False |
|
1792 | 1792 | self.strip(repo, [rev], update=update, backup='strip') |
|
1793 | 1793 | if qpp: |
|
1794 | 1794 | self.ui.warn(_("saved queue repository parents: %s %s\n") % |
|
1795 | 1795 | (short(qpp[0]), short(qpp[1]))) |
|
1796 | 1796 | if qupdate: |
|
1797 | 1797 | self.ui.status(_("updating queue directory\n")) |
|
1798 | 1798 | r = self.qrepo() |
|
1799 | 1799 | if not r: |
|
1800 | 1800 | self.ui.warn(_("Unable to load queue repository\n")) |
|
1801 | 1801 | return 1 |
|
1802 | 1802 | hg.clean(r, qpp[0]) |
|
1803 | 1803 | |
|
1804 | 1804 | def save(self, repo, msg=None): |
|
1805 | 1805 | if not self.applied: |
|
1806 | 1806 | self.ui.warn(_("save: no patches applied, exiting\n")) |
|
1807 | 1807 | return 1 |
|
1808 | 1808 | if self.issaveline(self.applied[-1]): |
|
1809 | 1809 | self.ui.warn(_("status is already saved\n")) |
|
1810 | 1810 | return 1 |
|
1811 | 1811 | |
|
1812 | 1812 | if not msg: |
|
1813 | 1813 | msg = _("hg patches saved state") |
|
1814 | 1814 | else: |
|
1815 | 1815 | msg = "hg patches: " + msg.rstrip('\r\n') |
|
1816 | 1816 | r = self.qrepo() |
|
1817 | 1817 | if r: |
|
1818 | 1818 | pp = r.dirstate.parents() |
|
1819 | 1819 | msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1])) |
|
1820 | 1820 | msg += "\n\nPatch Data:\n" |
|
1821 | 1821 | msg += ''.join('%s\n' % x for x in self.applied) |
|
1822 | 1822 | msg += ''.join(':%s\n' % x for x in self.fullseries) |
|
1823 | 1823 | n = repo.commit(msg, force=True) |
|
1824 | 1824 | if not n: |
|
1825 | 1825 | self.ui.warn(_("repo commit failed\n")) |
|
1826 | 1826 | return 1 |
|
1827 | 1827 | self.applied.append(statusentry(n, '.hg.patches.save.line')) |
|
1828 | 1828 | self.applieddirty = True |
|
1829 | 1829 | self.removeundo(repo) |
|
1830 | 1830 | |
|
1831 | 1831 | def fullseriesend(self): |
|
1832 | 1832 | if self.applied: |
|
1833 | 1833 | p = self.applied[-1].name |
|
1834 | 1834 | end = self.findseries(p) |
|
1835 | 1835 | if end is None: |
|
1836 | 1836 | return len(self.fullseries) |
|
1837 | 1837 | return end + 1 |
|
1838 | 1838 | return 0 |
|
1839 | 1839 | |
|
1840 | 1840 | def seriesend(self, all_patches=False): |
|
1841 | 1841 | """If all_patches is False, return the index of the next pushable patch |
|
1842 | 1842 | in the series, or the series length. If all_patches is True, return the |
|
1843 | 1843 | index of the first patch past the last applied one. |
|
1844 | 1844 | """ |
|
1845 | 1845 | end = 0 |
|
1846 | 1846 | def next(start): |
|
1847 | 1847 | if all_patches or start >= len(self.series): |
|
1848 | 1848 | return start |
|
1849 | 1849 | for i in xrange(start, len(self.series)): |
|
1850 | 1850 | p, reason = self.pushable(i) |
|
1851 | 1851 | if p: |
|
1852 | 1852 | return i |
|
1853 | 1853 | self.explainpushable(i) |
|
1854 | 1854 | return len(self.series) |
|
1855 | 1855 | if self.applied: |
|
1856 | 1856 | p = self.applied[-1].name |
|
1857 | 1857 | try: |
|
1858 | 1858 | end = self.series.index(p) |
|
1859 | 1859 | except ValueError: |
|
1860 | 1860 | return 0 |
|
1861 | 1861 | return next(end + 1) |
|
1862 | 1862 | return next(end) |
|
1863 | 1863 | |
|
1864 | 1864 | def appliedname(self, index): |
|
1865 | 1865 | pname = self.applied[index].name |
|
1866 | 1866 | if not self.ui.verbose: |
|
1867 | 1867 | p = pname |
|
1868 | 1868 | else: |
|
1869 | 1869 | p = str(self.series.index(pname)) + " " + pname |
|
1870 | 1870 | return p |
|
1871 | 1871 | |
|
1872 | 1872 | def qimport(self, repo, files, patchname=None, rev=None, existing=None, |
|
1873 | 1873 | force=None, git=False): |
|
1874 | 1874 | def checkseries(patchname): |
|
1875 | 1875 | if patchname in self.series: |
|
1876 | 1876 | raise util.Abort(_('patch %s is already in the series file') |
|
1877 | 1877 | % patchname) |
|
1878 | 1878 | |
|
1879 | 1879 | if rev: |
|
1880 | 1880 | if files: |
|
1881 | 1881 | raise util.Abort(_('option "-r" not valid when importing ' |
|
1882 | 1882 | 'files')) |
|
1883 | 1883 | rev = scmutil.revrange(repo, rev) |
|
1884 | 1884 | rev.sort(reverse=True) |
|
1885 | 1885 | if (len(files) > 1 or len(rev) > 1) and patchname: |
|
1886 | 1886 | raise util.Abort(_('option "-n" not valid when importing multiple ' |
|
1887 | 1887 | 'patches')) |
|
1888 | 1888 | imported = [] |
|
1889 | 1889 | if rev: |
|
1890 | 1890 | # If mq patches are applied, we can only import revisions |
|
1891 | 1891 | # that form a linear path to qbase. |
|
1892 | 1892 | # Otherwise, they should form a linear path to a head. |
|
1893 | 1893 | heads = repo.changelog.heads(repo.changelog.node(rev[-1])) |
|
1894 | 1894 | if len(heads) > 1: |
|
1895 | 1895 | raise util.Abort(_('revision %d is the root of more than one ' |
|
1896 | 1896 | 'branch') % rev[-1]) |
|
1897 | 1897 | if self.applied: |
|
1898 | 1898 | base = repo.changelog.node(rev[0]) |
|
1899 | 1899 | if base in [n.node for n in self.applied]: |
|
1900 | 1900 | raise util.Abort(_('revision %d is already managed') |
|
1901 | 1901 | % rev[0]) |
|
1902 | 1902 | if heads != [self.applied[-1].node]: |
|
1903 | 1903 | raise util.Abort(_('revision %d is not the parent of ' |
|
1904 | 1904 | 'the queue') % rev[0]) |
|
1905 | 1905 | base = repo.changelog.rev(self.applied[0].node) |
|
1906 | 1906 | lastparent = repo.changelog.parentrevs(base)[0] |
|
1907 | 1907 | else: |
|
1908 | 1908 | if heads != [repo.changelog.node(rev[0])]: |
|
1909 | 1909 | raise util.Abort(_('revision %d has unmanaged children') |
|
1910 | 1910 | % rev[0]) |
|
1911 | 1911 | lastparent = None |
|
1912 | 1912 | |
|
1913 | 1913 | diffopts = self.diffopts({'git': git}) |
|
1914 | 1914 | for r in rev: |
|
1915 | 1915 | if not repo[r].mutable(): |
|
1916 | 1916 | raise util.Abort(_('revision %d is not mutable') % r, |
|
1917 | 1917 | hint=_('see "hg help phases" for details')) |
|
1918 | 1918 | p1, p2 = repo.changelog.parentrevs(r) |
|
1919 | 1919 | n = repo.changelog.node(r) |
|
1920 | 1920 | if p2 != nullrev: |
|
1921 | 1921 | raise util.Abort(_('cannot import merge revision %d') % r) |
|
1922 | 1922 | if lastparent and lastparent != r: |
|
1923 | 1923 | raise util.Abort(_('revision %d is not the parent of %d') |
|
1924 | 1924 | % (r, lastparent)) |
|
1925 | 1925 | lastparent = p1 |
|
1926 | 1926 | |
|
1927 | 1927 | if not patchname: |
|
1928 | 1928 | patchname = normname('%d.diff' % r) |
|
1929 | 1929 | checkseries(patchname) |
|
1930 | 1930 | self.checkpatchname(patchname, force) |
|
1931 | 1931 | self.fullseries.insert(0, patchname) |
|
1932 | 1932 | |
|
1933 | 1933 | patchf = self.opener(patchname, "w") |
|
1934 | 1934 | cmdutil.export(repo, [n], fp=patchf, opts=diffopts) |
|
1935 | 1935 | patchf.close() |
|
1936 | 1936 | |
|
1937 | 1937 | se = statusentry(n, patchname) |
|
1938 | 1938 | self.applied.insert(0, se) |
|
1939 | 1939 | |
|
1940 | 1940 | self.added.append(patchname) |
|
1941 | 1941 | imported.append(patchname) |
|
1942 | 1942 | patchname = None |
|
1943 | 1943 | if rev and repo.ui.configbool('mq', 'secret', False): |
|
1944 | 1944 | # if we added anything with --rev, we must move the secret root |
|
1945 | 1945 | phases.retractboundary(repo, phases.secret, [n]) |
|
1946 | 1946 | self.parseseries() |
|
1947 | 1947 | self.applieddirty = True |
|
1948 | 1948 | self.seriesdirty = True |
|
1949 | 1949 | |
|
1950 | 1950 | for i, filename in enumerate(files): |
|
1951 | 1951 | if existing: |
|
1952 | 1952 | if filename == '-': |
|
1953 | 1953 | raise util.Abort(_('-e is incompatible with import from -')) |
|
1954 | 1954 | filename = normname(filename) |
|
1955 | 1955 | self.checkreservedname(filename) |
|
1956 | 1956 | originpath = self.join(filename) |
|
1957 | 1957 | if not os.path.isfile(originpath): |
|
1958 | 1958 | raise util.Abort(_("patch %s does not exist") % filename) |
|
1959 | 1959 | |
|
1960 | 1960 | if patchname: |
|
1961 | 1961 | self.checkpatchname(patchname, force) |
|
1962 | 1962 | |
|
1963 | 1963 | self.ui.write(_('renaming %s to %s\n') |
|
1964 | 1964 | % (filename, patchname)) |
|
1965 | 1965 | util.rename(originpath, self.join(patchname)) |
|
1966 | 1966 | else: |
|
1967 | 1967 | patchname = filename |
|
1968 | 1968 | |
|
1969 | 1969 | else: |
|
1970 | 1970 | if filename == '-' and not patchname: |
|
1971 | 1971 | raise util.Abort(_('need --name to import a patch from -')) |
|
1972 | 1972 | elif not patchname: |
|
1973 | 1973 | patchname = normname(os.path.basename(filename.rstrip('/'))) |
|
1974 | 1974 | self.checkpatchname(patchname, force) |
|
1975 | 1975 | try: |
|
1976 | 1976 | if filename == '-': |
|
1977 | 1977 | text = self.ui.fin.read() |
|
1978 | 1978 | else: |
|
1979 | 1979 | fp = url.open(self.ui, filename) |
|
1980 | 1980 | text = fp.read() |
|
1981 | 1981 | fp.close() |
|
1982 | 1982 | except (OSError, IOError): |
|
1983 | 1983 | raise util.Abort(_("unable to read file %s") % filename) |
|
1984 | 1984 | patchf = self.opener(patchname, "w") |
|
1985 | 1985 | patchf.write(text) |
|
1986 | 1986 | patchf.close() |
|
1987 | 1987 | if not force: |
|
1988 | 1988 | checkseries(patchname) |
|
1989 | 1989 | if patchname not in self.series: |
|
1990 | 1990 | index = self.fullseriesend() + i |
|
1991 | 1991 | self.fullseries[index:index] = [patchname] |
|
1992 | 1992 | self.parseseries() |
|
1993 | 1993 | self.seriesdirty = True |
|
1994 | 1994 | self.ui.warn(_("adding %s to series file\n") % patchname) |
|
1995 | 1995 | self.added.append(patchname) |
|
1996 | 1996 | imported.append(patchname) |
|
1997 | 1997 | patchname = None |
|
1998 | 1998 | |
|
1999 | 1999 | self.removeundo(repo) |
|
2000 | 2000 | return imported |
|
2001 | 2001 | |
|
2002 | 2002 | def fixcheckopts(ui, opts): |
|
2003 | 2003 | if (not ui.configbool('mq', 'check') or opts.get('force') |
|
2004 | 2004 | or opts.get('exact')): |
|
2005 | 2005 | return opts |
|
2006 | 2006 | opts = dict(opts) |
|
2007 | 2007 | opts['check'] = True |
|
2008 | 2008 | return opts |
|
2009 | 2009 | |
|
2010 | 2010 | @command("qdelete|qremove|qrm", |
|
2011 | 2011 | [('k', 'keep', None, _('keep patch file')), |
|
2012 | 2012 | ('r', 'rev', [], |
|
2013 | 2013 | _('stop managing a revision (DEPRECATED)'), _('REV'))], |
|
2014 | 2014 | _('hg qdelete [-k] [PATCH]...')) |
|
2015 | 2015 | def delete(ui, repo, *patches, **opts): |
|
2016 | 2016 | """remove patches from queue |
|
2017 | 2017 | |
|
2018 | 2018 | The patches must not be applied, and at least one patch is required. Exact |
|
2019 | 2019 | patch identifiers must be given. With -k/--keep, the patch files are |
|
2020 | 2020 | preserved in the patch directory. |
|
2021 | 2021 | |
|
2022 | 2022 | To stop managing a patch and move it into permanent history, |
|
2023 | 2023 | use the :hg:`qfinish` command.""" |
|
2024 | 2024 | q = repo.mq |
|
2025 | 2025 | q.delete(repo, patches, opts) |
|
2026 | 2026 | q.savedirty() |
|
2027 | 2027 | return 0 |
|
2028 | 2028 | |
|
2029 | 2029 | @command("qapplied", |
|
2030 | 2030 | [('1', 'last', None, _('show only the preceding applied patch')) |
|
2031 | 2031 | ] + seriesopts, |
|
2032 | 2032 | _('hg qapplied [-1] [-s] [PATCH]')) |
|
2033 | 2033 | def applied(ui, repo, patch=None, **opts): |
|
2034 | 2034 | """print the patches already applied |
|
2035 | 2035 | |
|
2036 | 2036 | Returns 0 on success.""" |
|
2037 | 2037 | |
|
2038 | 2038 | q = repo.mq |
|
2039 | 2039 | |
|
2040 | 2040 | if patch: |
|
2041 | 2041 | if patch not in q.series: |
|
2042 | 2042 | raise util.Abort(_("patch %s is not in series file") % patch) |
|
2043 | 2043 | end = q.series.index(patch) + 1 |
|
2044 | 2044 | else: |
|
2045 | 2045 | end = q.seriesend(True) |
|
2046 | 2046 | |
|
2047 | 2047 | if opts.get('last') and not end: |
|
2048 | 2048 | ui.write(_("no patches applied\n")) |
|
2049 | 2049 | return 1 |
|
2050 | 2050 | elif opts.get('last') and end == 1: |
|
2051 | 2051 | ui.write(_("only one patch applied\n")) |
|
2052 | 2052 | return 1 |
|
2053 | 2053 | elif opts.get('last'): |
|
2054 | 2054 | start = end - 2 |
|
2055 | 2055 | end = 1 |
|
2056 | 2056 | else: |
|
2057 | 2057 | start = 0 |
|
2058 | 2058 | |
|
2059 | 2059 | q.qseries(repo, length=end, start=start, status='A', |
|
2060 | 2060 | summary=opts.get('summary')) |
|
2061 | 2061 | |
|
2062 | 2062 | |
|
2063 | 2063 | @command("qunapplied", |
|
2064 | 2064 | [('1', 'first', None, _('show only the first patch'))] + seriesopts, |
|
2065 | 2065 | _('hg qunapplied [-1] [-s] [PATCH]')) |
|
2066 | 2066 | def unapplied(ui, repo, patch=None, **opts): |
|
2067 | 2067 | """print the patches not yet applied |
|
2068 | 2068 | |
|
2069 | 2069 | Returns 0 on success.""" |
|
2070 | 2070 | |
|
2071 | 2071 | q = repo.mq |
|
2072 | 2072 | if patch: |
|
2073 | 2073 | if patch not in q.series: |
|
2074 | 2074 | raise util.Abort(_("patch %s is not in series file") % patch) |
|
2075 | 2075 | start = q.series.index(patch) + 1 |
|
2076 | 2076 | else: |
|
2077 | 2077 | start = q.seriesend(True) |
|
2078 | 2078 | |
|
2079 | 2079 | if start == len(q.series) and opts.get('first'): |
|
2080 | 2080 | ui.write(_("all patches applied\n")) |
|
2081 | 2081 | return 1 |
|
2082 | 2082 | |
|
2083 | 2083 | length = opts.get('first') and 1 or None |
|
2084 | 2084 | q.qseries(repo, start=start, length=length, status='U', |
|
2085 | 2085 | summary=opts.get('summary')) |
|
2086 | 2086 | |
|
2087 | 2087 | @command("qimport", |
|
2088 | 2088 | [('e', 'existing', None, _('import file in patch directory')), |
|
2089 | 2089 | ('n', 'name', '', |
|
2090 | 2090 | _('name of patch file'), _('NAME')), |
|
2091 | 2091 | ('f', 'force', None, _('overwrite existing files')), |
|
2092 | 2092 | ('r', 'rev', [], |
|
2093 | 2093 | _('place existing revisions under mq control'), _('REV')), |
|
2094 | 2094 | ('g', 'git', None, _('use git extended diff format')), |
|
2095 | 2095 | ('P', 'push', None, _('qpush after importing'))], |
|
2096 | 2096 | _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')) |
|
2097 | 2097 | def qimport(ui, repo, *filename, **opts): |
|
2098 | 2098 | """import a patch or existing changeset |
|
2099 | 2099 | |
|
2100 | 2100 | The patch is inserted into the series after the last applied |
|
2101 | 2101 | patch. If no patches have been applied, qimport prepends the patch |
|
2102 | 2102 | to the series. |
|
2103 | 2103 | |
|
2104 | 2104 | The patch will have the same name as its source file unless you |
|
2105 | 2105 | give it a new one with -n/--name. |
|
2106 | 2106 | |
|
2107 | 2107 | You can register an existing patch inside the patch directory with |
|
2108 | 2108 | the -e/--existing flag. |
|
2109 | 2109 | |
|
2110 | 2110 | With -f/--force, an existing patch of the same name will be |
|
2111 | 2111 | overwritten. |
|
2112 | 2112 | |
|
2113 | 2113 | An existing changeset may be placed under mq control with -r/--rev |
|
2114 | 2114 | (e.g. qimport --rev tip -n patch will place tip under mq control). |
|
2115 | 2115 | With -g/--git, patches imported with --rev will use the git diff |
|
2116 | 2116 | format. See the diffs help topic for information on why this is |
|
2117 | 2117 | important for preserving rename/copy information and permission |
|
2118 | 2118 | changes. Use :hg:`qfinish` to remove changesets from mq control. |
|
2119 | 2119 | |
|
2120 | 2120 | To import a patch from standard input, pass - as the patch file. |
|
2121 | 2121 | When importing from standard input, a patch name must be specified |
|
2122 | 2122 | using the --name flag. |
|
2123 | 2123 | |
|
2124 | 2124 | To import an existing patch while renaming it:: |
|
2125 | 2125 | |
|
2126 | 2126 | hg qimport -e existing-patch -n new-name |
|
2127 | 2127 | |
|
2128 | 2128 | Returns 0 if import succeeded. |
|
2129 | 2129 | """ |
|
2130 | 2130 | lock = repo.lock() # cause this may move phase |
|
2131 | 2131 | try: |
|
2132 | 2132 | q = repo.mq |
|
2133 | 2133 | try: |
|
2134 | 2134 | imported = q.qimport( |
|
2135 | 2135 | repo, filename, patchname=opts.get('name'), |
|
2136 | 2136 | existing=opts.get('existing'), force=opts.get('force'), |
|
2137 | 2137 | rev=opts.get('rev'), git=opts.get('git')) |
|
2138 | 2138 | finally: |
|
2139 | 2139 | q.savedirty() |
|
2140 | 2140 | |
|
2141 | 2141 | |
|
2142 | 2142 | if imported and opts.get('push') and not opts.get('rev'): |
|
2143 | 2143 | return q.push(repo, imported[-1]) |
|
2144 | 2144 | finally: |
|
2145 | 2145 | lock.release() |
|
2146 | 2146 | return 0 |
|
2147 | 2147 | |
|
2148 | 2148 | def qinit(ui, repo, create): |
|
2149 | 2149 | """initialize a new queue repository |
|
2150 | 2150 | |
|
2151 | 2151 | This command also creates a series file for ordering patches, and |
|
2152 | 2152 | an mq-specific .hgignore file in the queue repository, to exclude |
|
2153 | 2153 | the status and guards files (these contain mostly transient state). |
|
2154 | 2154 | |
|
2155 | 2155 | Returns 0 if initialization succeeded.""" |
|
2156 | 2156 | q = repo.mq |
|
2157 | 2157 | r = q.init(repo, create) |
|
2158 | 2158 | q.savedirty() |
|
2159 | 2159 | if r: |
|
2160 | 2160 | if not os.path.exists(r.wjoin('.hgignore')): |
|
2161 | 2161 | fp = r.wopener('.hgignore', 'w') |
|
2162 | 2162 | fp.write('^\\.hg\n') |
|
2163 | 2163 | fp.write('^\\.mq\n') |
|
2164 | 2164 | fp.write('syntax: glob\n') |
|
2165 | 2165 | fp.write('status\n') |
|
2166 | 2166 | fp.write('guards\n') |
|
2167 | 2167 | fp.close() |
|
2168 | 2168 | if not os.path.exists(r.wjoin('series')): |
|
2169 | 2169 | r.wopener('series', 'w').close() |
|
2170 | 2170 | r[None].add(['.hgignore', 'series']) |
|
2171 | 2171 | commands.add(ui, r) |
|
2172 | 2172 | return 0 |
|
2173 | 2173 | |
|
2174 | 2174 | @command("^qinit", |
|
2175 | 2175 | [('c', 'create-repo', None, _('create queue repository'))], |
|
2176 | 2176 | _('hg qinit [-c]')) |
|
2177 | 2177 | def init(ui, repo, **opts): |
|
2178 | 2178 | """init a new queue repository (DEPRECATED) |
|
2179 | 2179 | |
|
2180 | 2180 | The queue repository is unversioned by default. If |
|
2181 | 2181 | -c/--create-repo is specified, qinit will create a separate nested |
|
2182 | 2182 | repository for patches (qinit -c may also be run later to convert |
|
2183 | 2183 | an unversioned patch repository into a versioned one). You can use |
|
2184 | 2184 | qcommit to commit changes to this queue repository. |
|
2185 | 2185 | |
|
2186 | 2186 | This command is deprecated. Without -c, it's implied by other relevant |
|
2187 | 2187 | commands. With -c, use :hg:`init --mq` instead.""" |
|
2188 | 2188 | return qinit(ui, repo, create=opts.get('create_repo')) |
|
2189 | 2189 | |
|
2190 | 2190 | @command("qclone", |
|
2191 | 2191 | [('', 'pull', None, _('use pull protocol to copy metadata')), |
|
2192 | 2192 | ('U', 'noupdate', None, |
|
2193 | 2193 | _('do not update the new working directories')), |
|
2194 | 2194 | ('', 'uncompressed', None, |
|
2195 | 2195 | _('use uncompressed transfer (fast over LAN)')), |
|
2196 | 2196 | ('p', 'patches', '', |
|
2197 | 2197 | _('location of source patch repository'), _('REPO')), |
|
2198 | 2198 | ] + commands.remoteopts, |
|
2199 | 2199 | _('hg qclone [OPTION]... SOURCE [DEST]')) |
|
2200 | 2200 | def clone(ui, source, dest=None, **opts): |
|
2201 | 2201 | '''clone main and patch repository at same time |
|
2202 | 2202 | |
|
2203 | 2203 | If source is local, destination will have no patches applied. If |
|
2204 | 2204 | source is remote, this command can not check if patches are |
|
2205 | 2205 | applied in source, so cannot guarantee that patches are not |
|
2206 | 2206 | applied in destination. If you clone remote repository, be sure |
|
2207 | 2207 | before that it has no patches applied. |
|
2208 | 2208 | |
|
2209 | 2209 | Source patch repository is looked for in <src>/.hg/patches by |
|
2210 | 2210 | default. Use -p <url> to change. |
|
2211 | 2211 | |
|
2212 | 2212 | The patch directory must be a nested Mercurial repository, as |
|
2213 | 2213 | would be created by :hg:`init --mq`. |
|
2214 | 2214 | |
|
2215 | 2215 | Return 0 on success. |
|
2216 | 2216 | ''' |
|
2217 | 2217 | def patchdir(repo): |
|
2218 | 2218 | """compute a patch repo url from a repo object""" |
|
2219 | 2219 | url = repo.url() |
|
2220 | 2220 | if url.endswith('/'): |
|
2221 | 2221 | url = url[:-1] |
|
2222 | 2222 | return url + '/.hg/patches' |
|
2223 | 2223 | |
|
2224 | 2224 | # main repo (destination and sources) |
|
2225 | 2225 | if dest is None: |
|
2226 | 2226 | dest = hg.defaultdest(source) |
|
2227 | 2227 | sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source)) |
|
2228 | 2228 | |
|
2229 | 2229 | # patches repo (source only) |
|
2230 | 2230 | if opts.get('patches'): |
|
2231 | 2231 | patchespath = ui.expandpath(opts.get('patches')) |
|
2232 | 2232 | else: |
|
2233 | 2233 | patchespath = patchdir(sr) |
|
2234 | 2234 | try: |
|
2235 | 2235 | hg.repository(ui, patchespath) |
|
2236 | 2236 | except error.RepoError: |
|
2237 | 2237 | raise util.Abort(_('versioned patch repository not found' |
|
2238 | 2238 | ' (see init --mq)')) |
|
2239 | 2239 | qbase, destrev = None, None |
|
2240 | 2240 | if sr.local(): |
|
2241 | 2241 | if sr.mq.applied and sr[qbase].phase() != phases.secret: |
|
2242 | 2242 | qbase = sr.mq.applied[0].node |
|
2243 | 2243 | if not hg.islocal(dest): |
|
2244 | 2244 | heads = set(sr.heads()) |
|
2245 | 2245 | destrev = list(heads.difference(sr.heads(qbase))) |
|
2246 | 2246 | destrev.append(sr.changelog.parents(qbase)[0]) |
|
2247 | 2247 | elif sr.capable('lookup'): |
|
2248 | 2248 | try: |
|
2249 | 2249 | qbase = sr.lookup('qbase') |
|
2250 | 2250 | except error.RepoError: |
|
2251 | 2251 | pass |
|
2252 | 2252 | |
|
2253 | 2253 | ui.note(_('cloning main repository\n')) |
|
2254 | 2254 | sr, dr = hg.clone(ui, opts, sr.url(), dest, |
|
2255 | 2255 | pull=opts.get('pull'), |
|
2256 | 2256 | rev=destrev, |
|
2257 | 2257 | update=False, |
|
2258 | 2258 | stream=opts.get('uncompressed')) |
|
2259 | 2259 | |
|
2260 | 2260 | ui.note(_('cloning patch repository\n')) |
|
2261 | 2261 | hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr), |
|
2262 | 2262 | pull=opts.get('pull'), update=not opts.get('noupdate'), |
|
2263 | 2263 | stream=opts.get('uncompressed')) |
|
2264 | 2264 | |
|
2265 | 2265 | if dr.local(): |
|
2266 | 2266 | if qbase: |
|
2267 | 2267 | ui.note(_('stripping applied patches from destination ' |
|
2268 | 2268 | 'repository\n')) |
|
2269 | 2269 | dr.mq.strip(dr, [qbase], update=False, backup=None) |
|
2270 | 2270 | if not opts.get('noupdate'): |
|
2271 | 2271 | ui.note(_('updating destination repository\n')) |
|
2272 | 2272 | hg.update(dr, dr.changelog.tip()) |
|
2273 | 2273 | |
|
2274 | 2274 | @command("qcommit|qci", |
|
2275 | 2275 | commands.table["^commit|ci"][1], |
|
2276 | 2276 | _('hg qcommit [OPTION]... [FILE]...')) |
|
2277 | 2277 | def commit(ui, repo, *pats, **opts): |
|
2278 | 2278 | """commit changes in the queue repository (DEPRECATED) |
|
2279 | 2279 | |
|
2280 | 2280 | This command is deprecated; use :hg:`commit --mq` instead.""" |
|
2281 | 2281 | q = repo.mq |
|
2282 | 2282 | r = q.qrepo() |
|
2283 | 2283 | if not r: |
|
2284 | 2284 | raise util.Abort('no queue repository') |
|
2285 | 2285 | commands.commit(r.ui, r, *pats, **opts) |
|
2286 | 2286 | |
|
2287 | 2287 | @command("qseries", |
|
2288 | 2288 | [('m', 'missing', None, _('print patches not in series')), |
|
2289 | 2289 | ] + seriesopts, |
|
2290 | 2290 | _('hg qseries [-ms]')) |
|
2291 | 2291 | def series(ui, repo, **opts): |
|
2292 | 2292 | """print the entire series file |
|
2293 | 2293 | |
|
2294 | 2294 | Returns 0 on success.""" |
|
2295 | 2295 | repo.mq.qseries(repo, missing=opts.get('missing'), |
|
2296 | 2296 | summary=opts.get('summary')) |
|
2297 | 2297 | return 0 |
|
2298 | 2298 | |
|
2299 | 2299 | @command("qtop", seriesopts, _('hg qtop [-s]')) |
|
2300 | 2300 | def top(ui, repo, **opts): |
|
2301 | 2301 | """print the name of the current patch |
|
2302 | 2302 | |
|
2303 | 2303 | Returns 0 on success.""" |
|
2304 | 2304 | q = repo.mq |
|
2305 | 2305 | t = q.applied and q.seriesend(True) or 0 |
|
2306 | 2306 | if t: |
|
2307 | 2307 | q.qseries(repo, start=t - 1, length=1, status='A', |
|
2308 | 2308 | summary=opts.get('summary')) |
|
2309 | 2309 | else: |
|
2310 | 2310 | ui.write(_("no patches applied\n")) |
|
2311 | 2311 | return 1 |
|
2312 | 2312 | |
|
2313 | 2313 | @command("qnext", seriesopts, _('hg qnext [-s]')) |
|
2314 | 2314 | def next(ui, repo, **opts): |
|
2315 | 2315 | """print the name of the next pushable patch |
|
2316 | 2316 | |
|
2317 | 2317 | Returns 0 on success.""" |
|
2318 | 2318 | q = repo.mq |
|
2319 | 2319 | end = q.seriesend() |
|
2320 | 2320 | if end == len(q.series): |
|
2321 | 2321 | ui.write(_("all patches applied\n")) |
|
2322 | 2322 | return 1 |
|
2323 | 2323 | q.qseries(repo, start=end, length=1, summary=opts.get('summary')) |
|
2324 | 2324 | |
|
2325 | 2325 | @command("qprev", seriesopts, _('hg qprev [-s]')) |
|
2326 | 2326 | def prev(ui, repo, **opts): |
|
2327 | 2327 | """print the name of the preceding applied patch |
|
2328 | 2328 | |
|
2329 | 2329 | Returns 0 on success.""" |
|
2330 | 2330 | q = repo.mq |
|
2331 | 2331 | l = len(q.applied) |
|
2332 | 2332 | if l == 1: |
|
2333 | 2333 | ui.write(_("only one patch applied\n")) |
|
2334 | 2334 | return 1 |
|
2335 | 2335 | if not l: |
|
2336 | 2336 | ui.write(_("no patches applied\n")) |
|
2337 | 2337 | return 1 |
|
2338 | 2338 | idx = q.series.index(q.applied[-2].name) |
|
2339 | 2339 | q.qseries(repo, start=idx, length=1, status='A', |
|
2340 | 2340 | summary=opts.get('summary')) |
|
2341 | 2341 | |
|
2342 | 2342 | def setupheaderopts(ui, opts): |
|
2343 | 2343 | if not opts.get('user') and opts.get('currentuser'): |
|
2344 | 2344 | opts['user'] = ui.username() |
|
2345 | 2345 | if not opts.get('date') and opts.get('currentdate'): |
|
2346 | 2346 | opts['date'] = "%d %d" % util.makedate() |
|
2347 | 2347 | |
|
2348 | 2348 | @command("^qnew", |
|
2349 | 2349 | [('e', 'edit', None, _('edit commit message')), |
|
2350 | 2350 | ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')), |
|
2351 | 2351 | ('g', 'git', None, _('use git extended diff format')), |
|
2352 | 2352 | ('U', 'currentuser', None, _('add "From: <current user>" to patch')), |
|
2353 | 2353 | ('u', 'user', '', |
|
2354 | 2354 | _('add "From: <USER>" to patch'), _('USER')), |
|
2355 | 2355 | ('D', 'currentdate', None, _('add "Date: <current date>" to patch')), |
|
2356 | 2356 | ('d', 'date', '', |
|
2357 | 2357 | _('add "Date: <DATE>" to patch'), _('DATE')) |
|
2358 | 2358 | ] + commands.walkopts + commands.commitopts, |
|
2359 | 2359 | _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')) |
|
2360 | 2360 | def new(ui, repo, patch, *args, **opts): |
|
2361 | 2361 | """create a new patch |
|
2362 | 2362 | |
|
2363 | 2363 | qnew creates a new patch on top of the currently-applied patch (if |
|
2364 | 2364 | any). The patch will be initialized with any outstanding changes |
|
2365 | 2365 | in the working directory. You may also use -I/--include, |
|
2366 | 2366 | -X/--exclude, and/or a list of files after the patch name to add |
|
2367 | 2367 | only changes to matching files to the new patch, leaving the rest |
|
2368 | 2368 | as uncommitted modifications. |
|
2369 | 2369 | |
|
2370 | 2370 | -u/--user and -d/--date can be used to set the (given) user and |
|
2371 | 2371 | date, respectively. -U/--currentuser and -D/--currentdate set user |
|
2372 | 2372 | to current user and date to current date. |
|
2373 | 2373 | |
|
2374 | 2374 | -e/--edit, -m/--message or -l/--logfile set the patch header as |
|
2375 | 2375 | well as the commit message. If none is specified, the header is |
|
2376 | 2376 | empty and the commit message is '[mq]: PATCH'. |
|
2377 | 2377 | |
|
2378 | 2378 | Use the -g/--git option to keep the patch in the git extended diff |
|
2379 | 2379 | format. Read the diffs help topic for more information on why this |
|
2380 | 2380 | is important for preserving permission changes and copy/rename |
|
2381 | 2381 | information. |
|
2382 | 2382 | |
|
2383 | 2383 | Returns 0 on successful creation of a new patch. |
|
2384 | 2384 | """ |
|
2385 | 2385 | msg = cmdutil.logmessage(ui, opts) |
|
2386 | 2386 | def getmsg(): |
|
2387 | 2387 | return ui.edit(msg, opts.get('user') or ui.username()) |
|
2388 | 2388 | q = repo.mq |
|
2389 | 2389 | opts['msg'] = msg |
|
2390 | 2390 | if opts.get('edit'): |
|
2391 | 2391 | opts['msg'] = getmsg |
|
2392 | 2392 | else: |
|
2393 | 2393 | opts['msg'] = msg |
|
2394 | 2394 | setupheaderopts(ui, opts) |
|
2395 | 2395 | q.new(repo, patch, *args, **opts) |
|
2396 | 2396 | q.savedirty() |
|
2397 | 2397 | return 0 |
|
2398 | 2398 | |
|
2399 | 2399 | @command("^qrefresh", |
|
2400 | 2400 | [('e', 'edit', None, _('edit commit message')), |
|
2401 | 2401 | ('g', 'git', None, _('use git extended diff format')), |
|
2402 | 2402 | ('s', 'short', None, |
|
2403 | 2403 | _('refresh only files already in the patch and specified files')), |
|
2404 | 2404 | ('U', 'currentuser', None, |
|
2405 | 2405 | _('add/update author field in patch with current user')), |
|
2406 | 2406 | ('u', 'user', '', |
|
2407 | 2407 | _('add/update author field in patch with given user'), _('USER')), |
|
2408 | 2408 | ('D', 'currentdate', None, |
|
2409 | 2409 | _('add/update date field in patch with current date')), |
|
2410 | 2410 | ('d', 'date', '', |
|
2411 | 2411 | _('add/update date field in patch with given date'), _('DATE')) |
|
2412 | 2412 | ] + commands.walkopts + commands.commitopts, |
|
2413 | 2413 | _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')) |
|
2414 | 2414 | def refresh(ui, repo, *pats, **opts): |
|
2415 | 2415 | """update the current patch |
|
2416 | 2416 | |
|
2417 | 2417 | If any file patterns are provided, the refreshed patch will |
|
2418 | 2418 | contain only the modifications that match those patterns; the |
|
2419 | 2419 | remaining modifications will remain in the working directory. |
|
2420 | 2420 | |
|
2421 | 2421 | If -s/--short is specified, files currently included in the patch |
|
2422 | 2422 | will be refreshed just like matched files and remain in the patch. |
|
2423 | 2423 | |
|
2424 | 2424 | If -e/--edit is specified, Mercurial will start your configured editor for |
|
2425 | 2425 | you to enter a message. In case qrefresh fails, you will find a backup of |
|
2426 | 2426 | your message in ``.hg/last-message.txt``. |
|
2427 | 2427 | |
|
2428 | 2428 | hg add/remove/copy/rename work as usual, though you might want to |
|
2429 | 2429 | use git-style patches (-g/--git or [diff] git=1) to track copies |
|
2430 | 2430 | and renames. See the diffs help topic for more information on the |
|
2431 | 2431 | git diff format. |
|
2432 | 2432 | |
|
2433 | 2433 | Returns 0 on success. |
|
2434 | 2434 | """ |
|
2435 | 2435 | q = repo.mq |
|
2436 | 2436 | message = cmdutil.logmessage(ui, opts) |
|
2437 | 2437 | if opts.get('edit'): |
|
2438 | 2438 | if not q.applied: |
|
2439 | 2439 | ui.write(_("no patches applied\n")) |
|
2440 | 2440 | return 1 |
|
2441 | 2441 | if message: |
|
2442 | 2442 | raise util.Abort(_('option "-e" incompatible with "-m" or "-l"')) |
|
2443 | 2443 | patch = q.applied[-1].name |
|
2444 | 2444 | ph = patchheader(q.join(patch), q.plainmode) |
|
2445 | 2445 | message = ui.edit('\n'.join(ph.message), ph.user or ui.username()) |
|
2446 | 2446 | # We don't want to lose the patch message if qrefresh fails (issue2062) |
|
2447 | 2447 | repo.savecommitmessage(message) |
|
2448 | 2448 | setupheaderopts(ui, opts) |
|
2449 | 2449 | wlock = repo.wlock() |
|
2450 | 2450 | try: |
|
2451 | 2451 | ret = q.refresh(repo, pats, msg=message, **opts) |
|
2452 | 2452 | q.savedirty() |
|
2453 | 2453 | return ret |
|
2454 | 2454 | finally: |
|
2455 | 2455 | wlock.release() |
|
2456 | 2456 | |
|
2457 | 2457 | @command("^qdiff", |
|
2458 | 2458 | commands.diffopts + commands.diffopts2 + commands.walkopts, |
|
2459 | 2459 | _('hg qdiff [OPTION]... [FILE]...')) |
|
2460 | 2460 | def diff(ui, repo, *pats, **opts): |
|
2461 | 2461 | """diff of the current patch and subsequent modifications |
|
2462 | 2462 | |
|
2463 | 2463 | Shows a diff which includes the current patch as well as any |
|
2464 | 2464 | changes which have been made in the working directory since the |
|
2465 | 2465 | last refresh (thus showing what the current patch would become |
|
2466 | 2466 | after a qrefresh). |
|
2467 | 2467 | |
|
2468 | 2468 | Use :hg:`diff` if you only want to see the changes made since the |
|
2469 | 2469 | last qrefresh, or :hg:`export qtip` if you want to see changes |
|
2470 | 2470 | made by the current patch without including changes made since the |
|
2471 | 2471 | qrefresh. |
|
2472 | 2472 | |
|
2473 | 2473 | Returns 0 on success. |
|
2474 | 2474 | """ |
|
2475 | 2475 | repo.mq.diff(repo, pats, opts) |
|
2476 | 2476 | return 0 |
|
2477 | 2477 | |
|
2478 | 2478 | @command('qfold', |
|
2479 | 2479 | [('e', 'edit', None, _('edit patch header')), |
|
2480 | 2480 | ('k', 'keep', None, _('keep folded patch files')), |
|
2481 | 2481 | ] + commands.commitopts, |
|
2482 | 2482 | _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')) |
|
2483 | 2483 | def fold(ui, repo, *files, **opts): |
|
2484 | 2484 | """fold the named patches into the current patch |
|
2485 | 2485 | |
|
2486 | 2486 | Patches must not yet be applied. Each patch will be successively |
|
2487 | 2487 | applied to the current patch in the order given. If all the |
|
2488 | 2488 | patches apply successfully, the current patch will be refreshed |
|
2489 | 2489 | with the new cumulative patch, and the folded patches will be |
|
2490 | 2490 | deleted. With -k/--keep, the folded patch files will not be |
|
2491 | 2491 | removed afterwards. |
|
2492 | 2492 | |
|
2493 | 2493 | The header for each folded patch will be concatenated with the |
|
2494 | 2494 | current patch header, separated by a line of ``* * *``. |
|
2495 | 2495 | |
|
2496 | 2496 | Returns 0 on success.""" |
|
2497 | 2497 | q = repo.mq |
|
2498 | 2498 | if not files: |
|
2499 | 2499 | raise util.Abort(_('qfold requires at least one patch name')) |
|
2500 | 2500 | if not q.checktoppatch(repo)[0]: |
|
2501 | 2501 | raise util.Abort(_('no patches applied')) |
|
2502 | 2502 | q.checklocalchanges(repo) |
|
2503 | 2503 | |
|
2504 | 2504 | message = cmdutil.logmessage(ui, opts) |
|
2505 | 2505 | if opts.get('edit'): |
|
2506 | 2506 | if message: |
|
2507 | 2507 | raise util.Abort(_('option "-e" incompatible with "-m" or "-l"')) |
|
2508 | 2508 | |
|
2509 | 2509 | parent = q.lookup('qtip') |
|
2510 | 2510 | patches = [] |
|
2511 | 2511 | messages = [] |
|
2512 | 2512 | for f in files: |
|
2513 | 2513 | p = q.lookup(f) |
|
2514 | 2514 | if p in patches or p == parent: |
|
2515 | 2515 | ui.warn(_('Skipping already folded patch %s\n') % p) |
|
2516 | 2516 | if q.isapplied(p): |
|
2517 | 2517 | raise util.Abort(_('qfold cannot fold already applied patch %s') |
|
2518 | 2518 | % p) |
|
2519 | 2519 | patches.append(p) |
|
2520 | 2520 | |
|
2521 | 2521 | for p in patches: |
|
2522 | 2522 | if not message: |
|
2523 | 2523 | ph = patchheader(q.join(p), q.plainmode) |
|
2524 | 2524 | if ph.message: |
|
2525 | 2525 | messages.append(ph.message) |
|
2526 | 2526 | pf = q.join(p) |
|
2527 | 2527 | (patchsuccess, files, fuzz) = q.patch(repo, pf) |
|
2528 | 2528 | if not patchsuccess: |
|
2529 | 2529 | raise util.Abort(_('error folding patch %s') % p) |
|
2530 | 2530 | |
|
2531 | 2531 | if not message: |
|
2532 | 2532 | ph = patchheader(q.join(parent), q.plainmode) |
|
2533 | 2533 | message, user = ph.message, ph.user |
|
2534 | 2534 | for msg in messages: |
|
2535 | 2535 | message.append('* * *') |
|
2536 | 2536 | message.extend(msg) |
|
2537 | 2537 | message = '\n'.join(message) |
|
2538 | 2538 | |
|
2539 | 2539 | if opts.get('edit'): |
|
2540 | 2540 | message = ui.edit(message, user or ui.username()) |
|
2541 | 2541 | |
|
2542 | 2542 | diffopts = q.patchopts(q.diffopts(), *patches) |
|
2543 | 2543 | wlock = repo.wlock() |
|
2544 | 2544 | try: |
|
2545 | 2545 | q.refresh(repo, msg=message, git=diffopts.git) |
|
2546 | 2546 | q.delete(repo, patches, opts) |
|
2547 | 2547 | q.savedirty() |
|
2548 | 2548 | finally: |
|
2549 | 2549 | wlock.release() |
|
2550 | 2550 | |
|
2551 | 2551 | @command("qgoto", |
|
2552 | 2552 | [('c', 'check', None, _('tolerate non-conflicting local changes')), |
|
2553 | 2553 | ('f', 'force', None, _('overwrite any local changes')), |
|
2554 | 2554 | ('', 'no-backup', None, _('do not save backup copies of files'))], |
|
2555 | 2555 | _('hg qgoto [OPTION]... PATCH')) |
|
2556 | 2556 | def goto(ui, repo, patch, **opts): |
|
2557 | 2557 | '''push or pop patches until named patch is at top of stack |
|
2558 | 2558 | |
|
2559 | 2559 | Returns 0 on success.''' |
|
2560 | 2560 | opts = fixcheckopts(ui, opts) |
|
2561 | 2561 | q = repo.mq |
|
2562 | 2562 | patch = q.lookup(patch) |
|
2563 | 2563 | nobackup = opts.get('no_backup') |
|
2564 | 2564 | check = opts.get('check') |
|
2565 | 2565 | if q.isapplied(patch): |
|
2566 | 2566 | ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup, |
|
2567 | 2567 | check=check) |
|
2568 | 2568 | else: |
|
2569 | 2569 | ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup, |
|
2570 | 2570 | check=check) |
|
2571 | 2571 | q.savedirty() |
|
2572 | 2572 | return ret |
|
2573 | 2573 | |
|
2574 | 2574 | @command("qguard", |
|
2575 | 2575 | [('l', 'list', None, _('list all patches and guards')), |
|
2576 | 2576 | ('n', 'none', None, _('drop all guards'))], |
|
2577 | 2577 | _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')) |
|
2578 | 2578 | def guard(ui, repo, *args, **opts): |
|
2579 | 2579 | '''set or print guards for a patch |
|
2580 | 2580 | |
|
2581 | 2581 | Guards control whether a patch can be pushed. A patch with no |
|
2582 | 2582 | guards is always pushed. A patch with a positive guard ("+foo") is |
|
2583 | 2583 | pushed only if the :hg:`qselect` command has activated it. A patch with |
|
2584 | 2584 | a negative guard ("-foo") is never pushed if the :hg:`qselect` command |
|
2585 | 2585 | has activated it. |
|
2586 | 2586 | |
|
2587 | 2587 | With no arguments, print the currently active guards. |
|
2588 | 2588 | With arguments, set guards for the named patch. |
|
2589 | 2589 | |
|
2590 | 2590 | .. note:: |
|
2591 | 2591 | Specifying negative guards now requires '--'. |
|
2592 | 2592 | |
|
2593 | 2593 | To set guards on another patch:: |
|
2594 | 2594 | |
|
2595 | 2595 | hg qguard other.patch -- +2.6.17 -stable |
|
2596 | 2596 | |
|
2597 | 2597 | Returns 0 on success. |
|
2598 | 2598 | ''' |
|
2599 | 2599 | def status(idx): |
|
2600 | 2600 | guards = q.seriesguards[idx] or ['unguarded'] |
|
2601 | 2601 | if q.series[idx] in applied: |
|
2602 | 2602 | state = 'applied' |
|
2603 | 2603 | elif q.pushable(idx)[0]: |
|
2604 | 2604 | state = 'unapplied' |
|
2605 | 2605 | else: |
|
2606 | 2606 | state = 'guarded' |
|
2607 | 2607 | label = 'qguard.patch qguard.%s qseries.%s' % (state, state) |
|
2608 | 2608 | ui.write('%s: ' % ui.label(q.series[idx], label)) |
|
2609 | 2609 | |
|
2610 | 2610 | for i, guard in enumerate(guards): |
|
2611 | 2611 | if guard.startswith('+'): |
|
2612 | 2612 | ui.write(guard, label='qguard.positive') |
|
2613 | 2613 | elif guard.startswith('-'): |
|
2614 | 2614 | ui.write(guard, label='qguard.negative') |
|
2615 | 2615 | else: |
|
2616 | 2616 | ui.write(guard, label='qguard.unguarded') |
|
2617 | 2617 | if i != len(guards) - 1: |
|
2618 | 2618 | ui.write(' ') |
|
2619 | 2619 | ui.write('\n') |
|
2620 | 2620 | q = repo.mq |
|
2621 | 2621 | applied = set(p.name for p in q.applied) |
|
2622 | 2622 | patch = None |
|
2623 | 2623 | args = list(args) |
|
2624 | 2624 | if opts.get('list'): |
|
2625 | 2625 | if args or opts.get('none'): |
|
2626 | 2626 | raise util.Abort(_('cannot mix -l/--list with options or ' |
|
2627 | 2627 | 'arguments')) |
|
2628 | 2628 | for i in xrange(len(q.series)): |
|
2629 | 2629 | status(i) |
|
2630 | 2630 | return |
|
2631 | 2631 | if not args or args[0][0:1] in '-+': |
|
2632 | 2632 | if not q.applied: |
|
2633 | 2633 | raise util.Abort(_('no patches applied')) |
|
2634 | 2634 | patch = q.applied[-1].name |
|
2635 | 2635 | if patch is None and args[0][0:1] not in '-+': |
|
2636 | 2636 | patch = args.pop(0) |
|
2637 | 2637 | if patch is None: |
|
2638 | 2638 | raise util.Abort(_('no patch to work with')) |
|
2639 | 2639 | if args or opts.get('none'): |
|
2640 | 2640 | idx = q.findseries(patch) |
|
2641 | 2641 | if idx is None: |
|
2642 | 2642 | raise util.Abort(_('no patch named %s') % patch) |
|
2643 | 2643 | q.setguards(idx, args) |
|
2644 | 2644 | q.savedirty() |
|
2645 | 2645 | else: |
|
2646 | 2646 | status(q.series.index(q.lookup(patch))) |
|
2647 | 2647 | |
|
2648 | 2648 | @command("qheader", [], _('hg qheader [PATCH]')) |
|
2649 | 2649 | def header(ui, repo, patch=None): |
|
2650 | 2650 | """print the header of the topmost or specified patch |
|
2651 | 2651 | |
|
2652 | 2652 | Returns 0 on success.""" |
|
2653 | 2653 | q = repo.mq |
|
2654 | 2654 | |
|
2655 | 2655 | if patch: |
|
2656 | 2656 | patch = q.lookup(patch) |
|
2657 | 2657 | else: |
|
2658 | 2658 | if not q.applied: |
|
2659 | 2659 | ui.write(_('no patches applied\n')) |
|
2660 | 2660 | return 1 |
|
2661 | 2661 | patch = q.lookup('qtip') |
|
2662 | 2662 | ph = patchheader(q.join(patch), q.plainmode) |
|
2663 | 2663 | |
|
2664 | 2664 | ui.write('\n'.join(ph.message) + '\n') |
|
2665 | 2665 | |
|
2666 | 2666 | def lastsavename(path): |
|
2667 | 2667 | (directory, base) = os.path.split(path) |
|
2668 | 2668 | names = os.listdir(directory) |
|
2669 | 2669 | namere = re.compile("%s.([0-9]+)" % base) |
|
2670 | 2670 | maxindex = None |
|
2671 | 2671 | maxname = None |
|
2672 | 2672 | for f in names: |
|
2673 | 2673 | m = namere.match(f) |
|
2674 | 2674 | if m: |
|
2675 | 2675 | index = int(m.group(1)) |
|
2676 | 2676 | if maxindex is None or index > maxindex: |
|
2677 | 2677 | maxindex = index |
|
2678 | 2678 | maxname = f |
|
2679 | 2679 | if maxname: |
|
2680 | 2680 | return (os.path.join(directory, maxname), maxindex) |
|
2681 | 2681 | return (None, None) |
|
2682 | 2682 | |
|
2683 | 2683 | def savename(path): |
|
2684 | 2684 | (last, index) = lastsavename(path) |
|
2685 | 2685 | if last is None: |
|
2686 | 2686 | index = 0 |
|
2687 | 2687 | newpath = path + ".%d" % (index + 1) |
|
2688 | 2688 | return newpath |
|
2689 | 2689 | |
|
2690 | 2690 | @command("^qpush", |
|
2691 | 2691 | [('c', 'check', None, _('tolerate non-conflicting local changes')), |
|
2692 | 2692 | ('f', 'force', None, _('apply on top of local changes')), |
|
2693 | 2693 | ('e', 'exact', None, |
|
2694 | 2694 | _('apply the target patch to its recorded parent')), |
|
2695 | 2695 | ('l', 'list', None, _('list patch name in commit text')), |
|
2696 | 2696 | ('a', 'all', None, _('apply all patches')), |
|
2697 | 2697 | ('m', 'merge', None, _('merge from another queue (DEPRECATED)')), |
|
2698 | 2698 | ('n', 'name', '', |
|
2699 | 2699 | _('merge queue name (DEPRECATED)'), _('NAME')), |
|
2700 | 2700 | ('', 'move', None, |
|
2701 | 2701 | _('reorder patch series and apply only the patch')), |
|
2702 | 2702 | ('', 'no-backup', None, _('do not save backup copies of files'))], |
|
2703 | 2703 | _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]')) |
|
2704 | 2704 | def push(ui, repo, patch=None, **opts): |
|
2705 | 2705 | """push the next patch onto the stack |
|
2706 | 2706 | |
|
2707 | 2707 | By default, abort if the working directory contains uncommitted |
|
2708 | 2708 | changes. With -c/--check, abort only if the uncommitted files |
|
2709 | 2709 | overlap with patched files. With -f/--force, backup and patch over |
|
2710 | 2710 | uncommitted changes. |
|
2711 | 2711 | |
|
2712 | 2712 | Return 0 on success. |
|
2713 | 2713 | """ |
|
2714 | 2714 | q = repo.mq |
|
2715 | 2715 | mergeq = None |
|
2716 | 2716 | |
|
2717 | 2717 | opts = fixcheckopts(ui, opts) |
|
2718 | 2718 | if opts.get('merge'): |
|
2719 | 2719 | if opts.get('name'): |
|
2720 | 2720 | newpath = repo.join(opts.get('name')) |
|
2721 | 2721 | else: |
|
2722 | 2722 | newpath, i = lastsavename(q.path) |
|
2723 | 2723 | if not newpath: |
|
2724 | 2724 | ui.warn(_("no saved queues found, please use -n\n")) |
|
2725 | 2725 | return 1 |
|
2726 | 2726 | mergeq = queue(ui, repo.path, newpath) |
|
2727 | 2727 | ui.warn(_("merging with queue at: %s\n") % mergeq.path) |
|
2728 | 2728 | ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'), |
|
2729 | 2729 | mergeq=mergeq, all=opts.get('all'), move=opts.get('move'), |
|
2730 | 2730 | exact=opts.get('exact'), nobackup=opts.get('no_backup'), |
|
2731 | 2731 | check=opts.get('check')) |
|
2732 | 2732 | return ret |
|
2733 | 2733 | |
|
2734 | 2734 | @command("^qpop", |
|
2735 | 2735 | [('a', 'all', None, _('pop all patches')), |
|
2736 | 2736 | ('n', 'name', '', |
|
2737 | 2737 | _('queue name to pop (DEPRECATED)'), _('NAME')), |
|
2738 | 2738 | ('c', 'check', None, _('tolerate non-conflicting local changes')), |
|
2739 | 2739 | ('f', 'force', None, _('forget any local changes to patched files')), |
|
2740 | 2740 | ('', 'no-backup', None, _('do not save backup copies of files'))], |
|
2741 | 2741 | _('hg qpop [-a] [-f] [PATCH | INDEX]')) |
|
2742 | 2742 | def pop(ui, repo, patch=None, **opts): |
|
2743 | 2743 | """pop the current patch off the stack |
|
2744 | 2744 | |
|
2745 | 2745 | Without argument, pops off the top of the patch stack. If given a |
|
2746 | 2746 | patch name, keeps popping off patches until the named patch is at |
|
2747 | 2747 | the top of the stack. |
|
2748 | 2748 | |
|
2749 | 2749 | By default, abort if the working directory contains uncommitted |
|
2750 | 2750 | changes. With -c/--check, abort only if the uncommitted files |
|
2751 | 2751 | overlap with patched files. With -f/--force, backup and discard |
|
2752 | 2752 | changes made to such files. |
|
2753 | 2753 | |
|
2754 | 2754 | Return 0 on success. |
|
2755 | 2755 | """ |
|
2756 | 2756 | opts = fixcheckopts(ui, opts) |
|
2757 | 2757 | localupdate = True |
|
2758 | 2758 | if opts.get('name'): |
|
2759 | 2759 | q = queue(ui, repo.path, repo.join(opts.get('name'))) |
|
2760 | 2760 | ui.warn(_('using patch queue: %s\n') % q.path) |
|
2761 | 2761 | localupdate = False |
|
2762 | 2762 | else: |
|
2763 | 2763 | q = repo.mq |
|
2764 | 2764 | ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate, |
|
2765 | 2765 | all=opts.get('all'), nobackup=opts.get('no_backup'), |
|
2766 | 2766 | check=opts.get('check')) |
|
2767 | 2767 | q.savedirty() |
|
2768 | 2768 | return ret |
|
2769 | 2769 | |
|
2770 | 2770 | @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]')) |
|
2771 | 2771 | def rename(ui, repo, patch, name=None, **opts): |
|
2772 | 2772 | """rename a patch |
|
2773 | 2773 | |
|
2774 | 2774 | With one argument, renames the current patch to PATCH1. |
|
2775 | 2775 | With two arguments, renames PATCH1 to PATCH2. |
|
2776 | 2776 | |
|
2777 | 2777 | Returns 0 on success.""" |
|
2778 | 2778 | q = repo.mq |
|
2779 | 2779 | if not name: |
|
2780 | 2780 | name = patch |
|
2781 | 2781 | patch = None |
|
2782 | 2782 | |
|
2783 | 2783 | if patch: |
|
2784 | 2784 | patch = q.lookup(patch) |
|
2785 | 2785 | else: |
|
2786 | 2786 | if not q.applied: |
|
2787 | 2787 | ui.write(_('no patches applied\n')) |
|
2788 | 2788 | return |
|
2789 | 2789 | patch = q.lookup('qtip') |
|
2790 | 2790 | absdest = q.join(name) |
|
2791 | 2791 | if os.path.isdir(absdest): |
|
2792 | 2792 | name = normname(os.path.join(name, os.path.basename(patch))) |
|
2793 | 2793 | absdest = q.join(name) |
|
2794 | 2794 | q.checkpatchname(name) |
|
2795 | 2795 | |
|
2796 | 2796 | ui.note(_('renaming %s to %s\n') % (patch, name)) |
|
2797 | 2797 | i = q.findseries(patch) |
|
2798 | 2798 | guards = q.guard_re.findall(q.fullseries[i]) |
|
2799 | 2799 | q.fullseries[i] = name + ''.join([' #' + g for g in guards]) |
|
2800 | 2800 | q.parseseries() |
|
2801 | 2801 | q.seriesdirty = True |
|
2802 | 2802 | |
|
2803 | 2803 | info = q.isapplied(patch) |
|
2804 | 2804 | if info: |
|
2805 | 2805 | q.applied[info[0]] = statusentry(info[1], name) |
|
2806 | 2806 | q.applieddirty = True |
|
2807 | 2807 | |
|
2808 | 2808 | destdir = os.path.dirname(absdest) |
|
2809 | 2809 | if not os.path.isdir(destdir): |
|
2810 | 2810 | os.makedirs(destdir) |
|
2811 | 2811 | util.rename(q.join(patch), absdest) |
|
2812 | 2812 | r = q.qrepo() |
|
2813 | 2813 | if r and patch in r.dirstate: |
|
2814 | 2814 | wctx = r[None] |
|
2815 | 2815 | wlock = r.wlock() |
|
2816 | 2816 | try: |
|
2817 | 2817 | if r.dirstate[patch] == 'a': |
|
2818 | 2818 | r.dirstate.drop(patch) |
|
2819 | 2819 | r.dirstate.add(name) |
|
2820 | 2820 | else: |
|
2821 | 2821 | wctx.copy(patch, name) |
|
2822 | 2822 | wctx.forget([patch]) |
|
2823 | 2823 | finally: |
|
2824 | 2824 | wlock.release() |
|
2825 | 2825 | |
|
2826 | 2826 | q.savedirty() |
|
2827 | 2827 | |
|
2828 | 2828 | @command("qrestore", |
|
2829 | 2829 | [('d', 'delete', None, _('delete save entry')), |
|
2830 | 2830 | ('u', 'update', None, _('update queue working directory'))], |
|
2831 | 2831 | _('hg qrestore [-d] [-u] REV')) |
|
2832 | 2832 | def restore(ui, repo, rev, **opts): |
|
2833 | 2833 | """restore the queue state saved by a revision (DEPRECATED) |
|
2834 | 2834 | |
|
2835 | 2835 | This command is deprecated, use :hg:`rebase` instead.""" |
|
2836 | 2836 | rev = repo.lookup(rev) |
|
2837 | 2837 | q = repo.mq |
|
2838 | 2838 | q.restore(repo, rev, delete=opts.get('delete'), |
|
2839 | 2839 | qupdate=opts.get('update')) |
|
2840 | 2840 | q.savedirty() |
|
2841 | 2841 | return 0 |
|
2842 | 2842 | |
|
2843 | 2843 | @command("qsave", |
|
2844 | 2844 | [('c', 'copy', None, _('copy patch directory')), |
|
2845 | 2845 | ('n', 'name', '', |
|
2846 | 2846 | _('copy directory name'), _('NAME')), |
|
2847 | 2847 | ('e', 'empty', None, _('clear queue status file')), |
|
2848 | 2848 | ('f', 'force', None, _('force copy'))] + commands.commitopts, |
|
2849 | 2849 | _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')) |
|
2850 | 2850 | def save(ui, repo, **opts): |
|
2851 | 2851 | """save current queue state (DEPRECATED) |
|
2852 | 2852 | |
|
2853 | 2853 | This command is deprecated, use :hg:`rebase` instead.""" |
|
2854 | 2854 | q = repo.mq |
|
2855 | 2855 | message = cmdutil.logmessage(ui, opts) |
|
2856 | 2856 | ret = q.save(repo, msg=message) |
|
2857 | 2857 | if ret: |
|
2858 | 2858 | return ret |
|
2859 | 2859 | q.savedirty() # save to .hg/patches before copying |
|
2860 | 2860 | if opts.get('copy'): |
|
2861 | 2861 | path = q.path |
|
2862 | 2862 | if opts.get('name'): |
|
2863 | 2863 | newpath = os.path.join(q.basepath, opts.get('name')) |
|
2864 | 2864 | if os.path.exists(newpath): |
|
2865 | 2865 | if not os.path.isdir(newpath): |
|
2866 | 2866 | raise util.Abort(_('destination %s exists and is not ' |
|
2867 | 2867 | 'a directory') % newpath) |
|
2868 | 2868 | if not opts.get('force'): |
|
2869 | 2869 | raise util.Abort(_('destination %s exists, ' |
|
2870 | 2870 | 'use -f to force') % newpath) |
|
2871 | 2871 | else: |
|
2872 | 2872 | newpath = savename(path) |
|
2873 | 2873 | ui.warn(_("copy %s to %s\n") % (path, newpath)) |
|
2874 | 2874 | util.copyfiles(path, newpath) |
|
2875 | 2875 | if opts.get('empty'): |
|
2876 | 2876 | del q.applied[:] |
|
2877 | 2877 | q.applieddirty = True |
|
2878 | 2878 | q.savedirty() |
|
2879 | 2879 | return 0 |
|
2880 | 2880 | |
|
2881 | 2881 | @command("strip", |
|
2882 | 2882 | [ |
|
2883 | 2883 | ('r', 'rev', [], _('strip specified revision (optional, ' |
|
2884 | 2884 | 'can specify revisions without this ' |
|
2885 | 2885 | 'option)'), _('REV')), |
|
2886 | 2886 | ('f', 'force', None, _('force removal of changesets, discard ' |
|
2887 | 2887 | 'uncommitted changes (no backup)')), |
|
2888 | 2888 | ('b', 'backup', None, _('bundle only changesets with local revision' |
|
2889 | 2889 | ' number greater than REV which are not' |
|
2890 | 2890 | ' descendants of REV (DEPRECATED)')), |
|
2891 | 2891 | ('', 'no-backup', None, _('no backups')), |
|
2892 | 2892 | ('', 'nobackup', None, _('no backups (DEPRECATED)')), |
|
2893 | 2893 | ('n', '', None, _('ignored (DEPRECATED)')), |
|
2894 | 2894 | ('k', 'keep', None, _("do not modify working copy during strip"))], |
|
2895 | 2895 | _('hg strip [-k] [-f] [-n] REV...')) |
|
2896 | 2896 | def strip(ui, repo, *revs, **opts): |
|
2897 | 2897 | """strip changesets and all their descendants from the repository |
|
2898 | 2898 | |
|
2899 | 2899 | The strip command removes the specified changesets and all their |
|
2900 | 2900 | descendants. If the working directory has uncommitted changes, the |
|
2901 | 2901 | operation is aborted unless the --force flag is supplied, in which |
|
2902 | 2902 | case changes will be discarded. |
|
2903 | 2903 | |
|
2904 | 2904 | If a parent of the working directory is stripped, then the working |
|
2905 | 2905 | directory will automatically be updated to the most recent |
|
2906 | 2906 | available ancestor of the stripped parent after the operation |
|
2907 | 2907 | completes. |
|
2908 | 2908 | |
|
2909 | 2909 | Any stripped changesets are stored in ``.hg/strip-backup`` as a |
|
2910 | 2910 | bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can |
|
2911 | 2911 | be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`, |
|
2912 | 2912 | where BUNDLE is the bundle file created by the strip. Note that |
|
2913 | 2913 | the local revision numbers will in general be different after the |
|
2914 | 2914 | restore. |
|
2915 | 2915 | |
|
2916 | 2916 | Use the --no-backup option to discard the backup bundle once the |
|
2917 | 2917 | operation completes. |
|
2918 | 2918 | |
|
2919 | 2919 | Return 0 on success. |
|
2920 | 2920 | """ |
|
2921 | 2921 | backup = 'all' |
|
2922 | 2922 | if opts.get('backup'): |
|
2923 | 2923 | backup = 'strip' |
|
2924 | 2924 | elif opts.get('no_backup') or opts.get('nobackup'): |
|
2925 | 2925 | backup = 'none' |
|
2926 | 2926 | |
|
2927 | 2927 | cl = repo.changelog |
|
2928 | 2928 | revs = list(revs) + opts.get('rev') |
|
2929 | 2929 | revs = set(scmutil.revrange(repo, revs)) |
|
2930 | 2930 | if not revs: |
|
2931 | 2931 | raise util.Abort(_('empty revision set')) |
|
2932 | 2932 | |
|
2933 | 2933 | descendants = set(cl.descendants(*revs)) |
|
2934 | 2934 | strippedrevs = revs.union(descendants) |
|
2935 | 2935 | roots = revs.difference(descendants) |
|
2936 | 2936 | |
|
2937 | 2937 | update = False |
|
2938 | 2938 | # if one of the wdir parent is stripped we'll need |
|
2939 | 2939 | # to update away to an earlier revision |
|
2940 | 2940 | for p in repo.dirstate.parents(): |
|
2941 | 2941 | if p != nullid and cl.rev(p) in strippedrevs: |
|
2942 | 2942 | update = True |
|
2943 | 2943 | break |
|
2944 | 2944 | |
|
2945 | 2945 | rootnodes = set(cl.node(r) for r in roots) |
|
2946 | 2946 | |
|
2947 | 2947 | q = repo.mq |
|
2948 | 2948 | if q.applied: |
|
2949 | 2949 | # refresh queue state if we're about to strip |
|
2950 | 2950 | # applied patches |
|
2951 | 2951 | if cl.rev(repo.lookup('qtip')) in strippedrevs: |
|
2952 | 2952 | q.applieddirty = True |
|
2953 | 2953 | start = 0 |
|
2954 | 2954 | end = len(q.applied) |
|
2955 | 2955 | for i, statusentry in enumerate(q.applied): |
|
2956 | 2956 | if statusentry.node in rootnodes: |
|
2957 | 2957 | # if one of the stripped roots is an applied |
|
2958 | 2958 | # patch, only part of the queue is stripped |
|
2959 | 2959 | start = i |
|
2960 | 2960 | break |
|
2961 | 2961 | del q.applied[start:end] |
|
2962 | 2962 | q.savedirty() |
|
2963 | 2963 | |
|
2964 | 2964 | revs = list(rootnodes) |
|
2965 | 2965 | if update and opts.get('keep'): |
|
2966 | 2966 | wlock = repo.wlock() |
|
2967 | 2967 | try: |
|
2968 | 2968 | urev = repo.mq.qparents(repo, revs[0]) |
|
2969 | 2969 | repo.dirstate.rebuild(urev, repo[urev].manifest()) |
|
2970 | 2970 | repo.dirstate.write() |
|
2971 | 2971 | update = False |
|
2972 | 2972 | finally: |
|
2973 | 2973 | wlock.release() |
|
2974 | 2974 | |
|
2975 | 2975 | repo.mq.strip(repo, revs, backup=backup, update=update, |
|
2976 | 2976 | force=opts.get('force')) |
|
2977 | 2977 | return 0 |
|
2978 | 2978 | |
|
2979 | 2979 | @command("qselect", |
|
2980 | 2980 | [('n', 'none', None, _('disable all guards')), |
|
2981 | 2981 | ('s', 'series', None, _('list all guards in series file')), |
|
2982 | 2982 | ('', 'pop', None, _('pop to before first guarded applied patch')), |
|
2983 | 2983 | ('', 'reapply', None, _('pop, then reapply patches'))], |
|
2984 | 2984 | _('hg qselect [OPTION]... [GUARD]...')) |
|
2985 | 2985 | def select(ui, repo, *args, **opts): |
|
2986 | 2986 | '''set or print guarded patches to push |
|
2987 | 2987 | |
|
2988 | 2988 | Use the :hg:`qguard` command to set or print guards on patch, then use |
|
2989 | 2989 | qselect to tell mq which guards to use. A patch will be pushed if |
|
2990 | 2990 | it has no guards or any positive guards match the currently |
|
2991 | 2991 | selected guard, but will not be pushed if any negative guards |
|
2992 | 2992 | match the current guard. For example:: |
|
2993 | 2993 | |
|
2994 | 2994 | qguard foo.patch -- -stable (negative guard) |
|
2995 | 2995 | qguard bar.patch +stable (positive guard) |
|
2996 | 2996 | qselect stable |
|
2997 | 2997 | |
|
2998 | 2998 | This activates the "stable" guard. mq will skip foo.patch (because |
|
2999 | 2999 | it has a negative match) but push bar.patch (because it has a |
|
3000 | 3000 | positive match). |
|
3001 | 3001 | |
|
3002 | 3002 | With no arguments, prints the currently active guards. |
|
3003 | 3003 | With one argument, sets the active guard. |
|
3004 | 3004 | |
|
3005 | 3005 | Use -n/--none to deactivate guards (no other arguments needed). |
|
3006 | 3006 | When no guards are active, patches with positive guards are |
|
3007 | 3007 | skipped and patches with negative guards are pushed. |
|
3008 | 3008 | |
|
3009 | 3009 | qselect can change the guards on applied patches. It does not pop |
|
3010 | 3010 | guarded patches by default. Use --pop to pop back to the last |
|
3011 | 3011 | applied patch that is not guarded. Use --reapply (which implies |
|
3012 | 3012 | --pop) to push back to the current patch afterwards, but skip |
|
3013 | 3013 | guarded patches. |
|
3014 | 3014 | |
|
3015 | 3015 | Use -s/--series to print a list of all guards in the series file |
|
3016 | 3016 | (no other arguments needed). Use -v for more information. |
|
3017 | 3017 | |
|
3018 | 3018 | Returns 0 on success.''' |
|
3019 | 3019 | |
|
3020 | 3020 | q = repo.mq |
|
3021 | 3021 | guards = q.active() |
|
3022 | 3022 | if args or opts.get('none'): |
|
3023 | 3023 | old_unapplied = q.unapplied(repo) |
|
3024 | 3024 | old_guarded = [i for i in xrange(len(q.applied)) if |
|
3025 | 3025 | not q.pushable(i)[0]] |
|
3026 | 3026 | q.setactive(args) |
|
3027 | 3027 | q.savedirty() |
|
3028 | 3028 | if not args: |
|
3029 | 3029 | ui.status(_('guards deactivated\n')) |
|
3030 | 3030 | if not opts.get('pop') and not opts.get('reapply'): |
|
3031 | 3031 | unapplied = q.unapplied(repo) |
|
3032 | 3032 | guarded = [i for i in xrange(len(q.applied)) |
|
3033 | 3033 | if not q.pushable(i)[0]] |
|
3034 | 3034 | if len(unapplied) != len(old_unapplied): |
|
3035 | 3035 | ui.status(_('number of unguarded, unapplied patches has ' |
|
3036 | 3036 | 'changed from %d to %d\n') % |
|
3037 | 3037 | (len(old_unapplied), len(unapplied))) |
|
3038 | 3038 | if len(guarded) != len(old_guarded): |
|
3039 | 3039 | ui.status(_('number of guarded, applied patches has changed ' |
|
3040 | 3040 | 'from %d to %d\n') % |
|
3041 | 3041 | (len(old_guarded), len(guarded))) |
|
3042 | 3042 | elif opts.get('series'): |
|
3043 | 3043 | guards = {} |
|
3044 | 3044 | noguards = 0 |
|
3045 | 3045 | for gs in q.seriesguards: |
|
3046 | 3046 | if not gs: |
|
3047 | 3047 | noguards += 1 |
|
3048 | 3048 | for g in gs: |
|
3049 | 3049 | guards.setdefault(g, 0) |
|
3050 | 3050 | guards[g] += 1 |
|
3051 | 3051 | if ui.verbose: |
|
3052 | 3052 | guards['NONE'] = noguards |
|
3053 | 3053 | guards = guards.items() |
|
3054 | 3054 | guards.sort(key=lambda x: x[0][1:]) |
|
3055 | 3055 | if guards: |
|
3056 | 3056 | ui.note(_('guards in series file:\n')) |
|
3057 | 3057 | for guard, count in guards: |
|
3058 | 3058 | ui.note('%2d ' % count) |
|
3059 | 3059 | ui.write(guard, '\n') |
|
3060 | 3060 | else: |
|
3061 | 3061 | ui.note(_('no guards in series file\n')) |
|
3062 | 3062 | else: |
|
3063 | 3063 | if guards: |
|
3064 | 3064 | ui.note(_('active guards:\n')) |
|
3065 | 3065 | for g in guards: |
|
3066 | 3066 | ui.write(g, '\n') |
|
3067 | 3067 | else: |
|
3068 | 3068 | ui.write(_('no active guards\n')) |
|
3069 | 3069 | reapply = opts.get('reapply') and q.applied and q.appliedname(-1) |
|
3070 | 3070 | popped = False |
|
3071 | 3071 | if opts.get('pop') or opts.get('reapply'): |
|
3072 | 3072 | for i in xrange(len(q.applied)): |
|
3073 | 3073 | pushable, reason = q.pushable(i) |
|
3074 | 3074 | if not pushable: |
|
3075 | 3075 | ui.status(_('popping guarded patches\n')) |
|
3076 | 3076 | popped = True |
|
3077 | 3077 | if i == 0: |
|
3078 | 3078 | q.pop(repo, all=True) |
|
3079 | 3079 | else: |
|
3080 | 3080 | q.pop(repo, str(i - 1)) |
|
3081 | 3081 | break |
|
3082 | 3082 | if popped: |
|
3083 | 3083 | try: |
|
3084 | 3084 | if reapply: |
|
3085 | 3085 | ui.status(_('reapplying unguarded patches\n')) |
|
3086 | 3086 | q.push(repo, reapply) |
|
3087 | 3087 | finally: |
|
3088 | 3088 | q.savedirty() |
|
3089 | 3089 | |
|
3090 | 3090 | @command("qfinish", |
|
3091 | 3091 | [('a', 'applied', None, _('finish all applied changesets'))], |
|
3092 | 3092 | _('hg qfinish [-a] [REV]...')) |
|
3093 | 3093 | def finish(ui, repo, *revrange, **opts): |
|
3094 | 3094 | """move applied patches into repository history |
|
3095 | 3095 | |
|
3096 | 3096 | Finishes the specified revisions (corresponding to applied |
|
3097 | 3097 | patches) by moving them out of mq control into regular repository |
|
3098 | 3098 | history. |
|
3099 | 3099 | |
|
3100 | 3100 | Accepts a revision range or the -a/--applied option. If --applied |
|
3101 | 3101 | is specified, all applied mq revisions are removed from mq |
|
3102 | 3102 | control. Otherwise, the given revisions must be at the base of the |
|
3103 | 3103 | stack of applied patches. |
|
3104 | 3104 | |
|
3105 | 3105 | This can be especially useful if your changes have been applied to |
|
3106 | 3106 | an upstream repository, or if you are about to push your changes |
|
3107 | 3107 | to upstream. |
|
3108 | 3108 | |
|
3109 | 3109 | Returns 0 on success. |
|
3110 | 3110 | """ |
|
3111 | 3111 | if not opts.get('applied') and not revrange: |
|
3112 | 3112 | raise util.Abort(_('no revisions specified')) |
|
3113 | 3113 | elif opts.get('applied'): |
|
3114 | 3114 | revrange = ('qbase::qtip',) + revrange |
|
3115 | 3115 | |
|
3116 | 3116 | q = repo.mq |
|
3117 | 3117 | if not q.applied: |
|
3118 | 3118 | ui.status(_('no patches applied\n')) |
|
3119 | 3119 | return 0 |
|
3120 | 3120 | |
|
3121 | 3121 | revs = scmutil.revrange(repo, revrange) |
|
3122 | 3122 | if repo['.'].rev() in revs and repo[None].files(): |
|
3123 | 3123 | ui.warn(_('warning: uncommitted changes in the working directory\n')) |
|
3124 | 3124 | # queue.finish may changes phases but leave the responsability to lock the |
|
3125 | 3125 | # repo to the caller to avoid deadlock with wlock. This command code is |
|
3126 | 3126 | # responsability for this locking. |
|
3127 | 3127 | lock = repo.lock() |
|
3128 | 3128 | try: |
|
3129 | 3129 | q.finish(repo, revs) |
|
3130 | 3130 | q.savedirty() |
|
3131 | 3131 | finally: |
|
3132 | 3132 | lock.release() |
|
3133 | 3133 | return 0 |
|
3134 | 3134 | |
|
3135 | 3135 | @command("qqueue", |
|
3136 | 3136 | [('l', 'list', False, _('list all available queues')), |
|
3137 | 3137 | ('', 'active', False, _('print name of active queue')), |
|
3138 | 3138 | ('c', 'create', False, _('create new queue')), |
|
3139 | 3139 | ('', 'rename', False, _('rename active queue')), |
|
3140 | 3140 | ('', 'delete', False, _('delete reference to queue')), |
|
3141 | 3141 | ('', 'purge', False, _('delete queue, and remove patch dir')), |
|
3142 | 3142 | ], |
|
3143 | 3143 | _('[OPTION] [QUEUE]')) |
|
3144 | 3144 | def qqueue(ui, repo, name=None, **opts): |
|
3145 | 3145 | '''manage multiple patch queues |
|
3146 | 3146 | |
|
3147 | 3147 | Supports switching between different patch queues, as well as creating |
|
3148 | 3148 | new patch queues and deleting existing ones. |
|
3149 | 3149 | |
|
3150 | 3150 | Omitting a queue name or specifying -l/--list will show you the registered |
|
3151 | 3151 | queues - by default the "normal" patches queue is registered. The currently |
|
3152 | 3152 | active queue will be marked with "(active)". Specifying --active will print |
|
3153 | 3153 | only the name of the active queue. |
|
3154 | 3154 | |
|
3155 | 3155 | To create a new queue, use -c/--create. The queue is automatically made |
|
3156 | 3156 | active, except in the case where there are applied patches from the |
|
3157 | 3157 | currently active queue in the repository. Then the queue will only be |
|
3158 | 3158 | created and switching will fail. |
|
3159 | 3159 | |
|
3160 | 3160 | To delete an existing queue, use --delete. You cannot delete the currently |
|
3161 | 3161 | active queue. |
|
3162 | 3162 | |
|
3163 | 3163 | Returns 0 on success. |
|
3164 | 3164 | ''' |
|
3165 | 3165 | q = repo.mq |
|
3166 | 3166 | _defaultqueue = 'patches' |
|
3167 | 3167 | _allqueues = 'patches.queues' |
|
3168 | 3168 | _activequeue = 'patches.queue' |
|
3169 | 3169 | |
|
3170 | 3170 | def _getcurrent(): |
|
3171 | 3171 | cur = os.path.basename(q.path) |
|
3172 | 3172 | if cur.startswith('patches-'): |
|
3173 | 3173 | cur = cur[8:] |
|
3174 | 3174 | return cur |
|
3175 | 3175 | |
|
3176 | 3176 | def _noqueues(): |
|
3177 | 3177 | try: |
|
3178 | 3178 | fh = repo.opener(_allqueues, 'r') |
|
3179 | 3179 | fh.close() |
|
3180 | 3180 | except IOError: |
|
3181 | 3181 | return True |
|
3182 | 3182 | |
|
3183 | 3183 | return False |
|
3184 | 3184 | |
|
3185 | 3185 | def _getqueues(): |
|
3186 | 3186 | current = _getcurrent() |
|
3187 | 3187 | |
|
3188 | 3188 | try: |
|
3189 | 3189 | fh = repo.opener(_allqueues, 'r') |
|
3190 | 3190 | queues = [queue.strip() for queue in fh if queue.strip()] |
|
3191 | 3191 | fh.close() |
|
3192 | 3192 | if current not in queues: |
|
3193 | 3193 | queues.append(current) |
|
3194 | 3194 | except IOError: |
|
3195 | 3195 | queues = [_defaultqueue] |
|
3196 | 3196 | |
|
3197 | 3197 | return sorted(queues) |
|
3198 | 3198 | |
|
3199 | 3199 | def _setactive(name): |
|
3200 | 3200 | if q.applied: |
|
3201 | 3201 | raise util.Abort(_('patches applied - cannot set new queue active')) |
|
3202 | 3202 | _setactivenocheck(name) |
|
3203 | 3203 | |
|
3204 | 3204 | def _setactivenocheck(name): |
|
3205 | 3205 | fh = repo.opener(_activequeue, 'w') |
|
3206 | 3206 | if name != 'patches': |
|
3207 | 3207 | fh.write(name) |
|
3208 | 3208 | fh.close() |
|
3209 | 3209 | |
|
3210 | 3210 | def _addqueue(name): |
|
3211 | 3211 | fh = repo.opener(_allqueues, 'a') |
|
3212 | 3212 | fh.write('%s\n' % (name,)) |
|
3213 | 3213 | fh.close() |
|
3214 | 3214 | |
|
3215 | 3215 | def _queuedir(name): |
|
3216 | 3216 | if name == 'patches': |
|
3217 | 3217 | return repo.join('patches') |
|
3218 | 3218 | else: |
|
3219 | 3219 | return repo.join('patches-' + name) |
|
3220 | 3220 | |
|
3221 | 3221 | def _validname(name): |
|
3222 | 3222 | for n in name: |
|
3223 | 3223 | if n in ':\\/.': |
|
3224 | 3224 | return False |
|
3225 | 3225 | return True |
|
3226 | 3226 | |
|
3227 | 3227 | def _delete(name): |
|
3228 | 3228 | if name not in existing: |
|
3229 | 3229 | raise util.Abort(_('cannot delete queue that does not exist')) |
|
3230 | 3230 | |
|
3231 | 3231 | current = _getcurrent() |
|
3232 | 3232 | |
|
3233 | 3233 | if name == current: |
|
3234 | 3234 | raise util.Abort(_('cannot delete currently active queue')) |
|
3235 | 3235 | |
|
3236 | 3236 | fh = repo.opener('patches.queues.new', 'w') |
|
3237 | 3237 | for queue in existing: |
|
3238 | 3238 | if queue == name: |
|
3239 | 3239 | continue |
|
3240 | 3240 | fh.write('%s\n' % (queue,)) |
|
3241 | 3241 | fh.close() |
|
3242 | 3242 | util.rename(repo.join('patches.queues.new'), repo.join(_allqueues)) |
|
3243 | 3243 | |
|
3244 | 3244 | if not name or opts.get('list') or opts.get('active'): |
|
3245 | 3245 | current = _getcurrent() |
|
3246 | 3246 | if opts.get('active'): |
|
3247 | 3247 | ui.write('%s\n' % (current,)) |
|
3248 | 3248 | return |
|
3249 | 3249 | for queue in _getqueues(): |
|
3250 | 3250 | ui.write('%s' % (queue,)) |
|
3251 | 3251 | if queue == current and not ui.quiet: |
|
3252 | 3252 | ui.write(_(' (active)\n')) |
|
3253 | 3253 | else: |
|
3254 | 3254 | ui.write('\n') |
|
3255 | 3255 | return |
|
3256 | 3256 | |
|
3257 | 3257 | if not _validname(name): |
|
3258 | 3258 | raise util.Abort( |
|
3259 | 3259 | _('invalid queue name, may not contain the characters ":\\/."')) |
|
3260 | 3260 | |
|
3261 | 3261 | existing = _getqueues() |
|
3262 | 3262 | |
|
3263 | 3263 | if opts.get('create'): |
|
3264 | 3264 | if name in existing: |
|
3265 | 3265 | raise util.Abort(_('queue "%s" already exists') % name) |
|
3266 | 3266 | if _noqueues(): |
|
3267 | 3267 | _addqueue(_defaultqueue) |
|
3268 | 3268 | _addqueue(name) |
|
3269 | 3269 | _setactive(name) |
|
3270 | 3270 | elif opts.get('rename'): |
|
3271 | 3271 | current = _getcurrent() |
|
3272 | 3272 | if name == current: |
|
3273 | 3273 | raise util.Abort(_('can\'t rename "%s" to its current name') % name) |
|
3274 | 3274 | if name in existing: |
|
3275 | 3275 | raise util.Abort(_('queue "%s" already exists') % name) |
|
3276 | 3276 | |
|
3277 | 3277 | olddir = _queuedir(current) |
|
3278 | 3278 | newdir = _queuedir(name) |
|
3279 | 3279 | |
|
3280 | 3280 | if os.path.exists(newdir): |
|
3281 | 3281 | raise util.Abort(_('non-queue directory "%s" already exists') % |
|
3282 | 3282 | newdir) |
|
3283 | 3283 | |
|
3284 | 3284 | fh = repo.opener('patches.queues.new', 'w') |
|
3285 | 3285 | for queue in existing: |
|
3286 | 3286 | if queue == current: |
|
3287 | 3287 | fh.write('%s\n' % (name,)) |
|
3288 | 3288 | if os.path.exists(olddir): |
|
3289 | 3289 | util.rename(olddir, newdir) |
|
3290 | 3290 | else: |
|
3291 | 3291 | fh.write('%s\n' % (queue,)) |
|
3292 | 3292 | fh.close() |
|
3293 | 3293 | util.rename(repo.join('patches.queues.new'), repo.join(_allqueues)) |
|
3294 | 3294 | _setactivenocheck(name) |
|
3295 | 3295 | elif opts.get('delete'): |
|
3296 | 3296 | _delete(name) |
|
3297 | 3297 | elif opts.get('purge'): |
|
3298 | 3298 | if name in existing: |
|
3299 | 3299 | _delete(name) |
|
3300 | 3300 | qdir = _queuedir(name) |
|
3301 | 3301 | if os.path.exists(qdir): |
|
3302 | 3302 | shutil.rmtree(qdir) |
|
3303 | 3303 | else: |
|
3304 | 3304 | if name not in existing: |
|
3305 | 3305 | raise util.Abort(_('use --create to create a new queue')) |
|
3306 | 3306 | _setactive(name) |
|
3307 | 3307 | |
|
3308 | 3308 | def mqphasedefaults(repo, roots): |
|
3309 | 3309 | """callback used to set mq changeset as secret when no phase data exists""" |
|
3310 | 3310 | if repo.mq.applied: |
|
3311 | 3311 | if repo.ui.configbool('mq', 'secret', False): |
|
3312 | 3312 | mqphase = phases.secret |
|
3313 | 3313 | else: |
|
3314 | 3314 | mqphase = phases.draft |
|
3315 | 3315 | qbase = repo[repo.mq.applied[0].node] |
|
3316 | 3316 | roots[mqphase].add(qbase.node()) |
|
3317 | 3317 | return roots |
|
3318 | 3318 | |
|
3319 | 3319 | def reposetup(ui, repo): |
|
3320 | 3320 | class mqrepo(repo.__class__): |
|
3321 | 3321 | @util.propertycache |
|
3322 | 3322 | def mq(self): |
|
3323 | 3323 | return queue(self.ui, self.path) |
|
3324 | 3324 | |
|
3325 | 3325 | def abortifwdirpatched(self, errmsg, force=False): |
|
3326 | 3326 | if self.mq.applied and not force: |
|
3327 | 3327 | parents = self.dirstate.parents() |
|
3328 | 3328 | patches = [s.node for s in self.mq.applied] |
|
3329 | 3329 | if parents[0] in patches or parents[1] in patches: |
|
3330 | 3330 | raise util.Abort(errmsg) |
|
3331 | 3331 | |
|
3332 | 3332 | def commit(self, text="", user=None, date=None, match=None, |
|
3333 | 3333 | force=False, editor=False, extra={}): |
|
3334 | 3334 | self.abortifwdirpatched( |
|
3335 | 3335 | _('cannot commit over an applied mq patch'), |
|
3336 | 3336 | force) |
|
3337 | 3337 | |
|
3338 | 3338 | return super(mqrepo, self).commit(text, user, date, match, force, |
|
3339 | 3339 | editor, extra) |
|
3340 | 3340 | |
|
3341 | 3341 | def checkpush(self, force, revs): |
|
3342 | 3342 | if self.mq.applied and not force: |
|
3343 | 3343 | outapplied = [e.node for e in self.mq.applied] |
|
3344 | 3344 | if revs: |
|
3345 | 3345 | # Assume applied patches have no non-patch descendants and |
|
3346 | 3346 | # are not on remote already. Filtering any changeset not |
|
3347 | 3347 | # pushed. |
|
3348 | 3348 | heads = set(revs) |
|
3349 | 3349 | for node in reversed(outapplied): |
|
3350 | 3350 | if node in heads: |
|
3351 | 3351 | break |
|
3352 | 3352 | else: |
|
3353 | 3353 | outapplied.pop() |
|
3354 | 3354 | # looking for pushed and shared changeset |
|
3355 | 3355 | for node in outapplied: |
|
3356 | 3356 | if repo[node].phase() < phases.secret: |
|
3357 | 3357 | raise util.Abort(_('source has mq patches applied')) |
|
3358 | 3358 | # no non-secret patches pushed |
|
3359 | 3359 | super(mqrepo, self).checkpush(force, revs) |
|
3360 | 3360 | |
|
3361 | 3361 | def _findtags(self): |
|
3362 | 3362 | '''augment tags from base class with patch tags''' |
|
3363 | 3363 | result = super(mqrepo, self)._findtags() |
|
3364 | 3364 | |
|
3365 | 3365 | q = self.mq |
|
3366 | 3366 | if not q.applied: |
|
3367 | 3367 | return result |
|
3368 | 3368 | |
|
3369 | 3369 | mqtags = [(patch.node, patch.name) for patch in q.applied] |
|
3370 | 3370 | |
|
3371 | 3371 | try: |
|
3372 | 3372 | self.changelog.rev(mqtags[-1][0]) |
|
3373 | 3373 | except error.LookupError: |
|
3374 | 3374 | self.ui.warn(_('mq status file refers to unknown node %s\n') |
|
3375 | 3375 | % short(mqtags[-1][0])) |
|
3376 | 3376 | return result |
|
3377 | 3377 | |
|
3378 | 3378 | mqtags.append((mqtags[-1][0], 'qtip')) |
|
3379 | 3379 | mqtags.append((mqtags[0][0], 'qbase')) |
|
3380 | 3380 | mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent')) |
|
3381 | 3381 | tags = result[0] |
|
3382 | 3382 | for patch in mqtags: |
|
3383 | 3383 | if patch[1] in tags: |
|
3384 | 3384 | self.ui.warn(_('Tag %s overrides mq patch of the same ' |
|
3385 | 3385 | 'name\n') % patch[1]) |
|
3386 | 3386 | else: |
|
3387 | 3387 | tags[patch[1]] = patch[0] |
|
3388 | 3388 | |
|
3389 | 3389 | return result |
|
3390 | 3390 | |
|
3391 | 3391 | def _branchtags(self, partial, lrev): |
|
3392 | 3392 | q = self.mq |
|
3393 | 3393 | cl = self.changelog |
|
3394 | 3394 | qbase = None |
|
3395 | 3395 | if not q.applied: |
|
3396 | 3396 | if getattr(self, '_committingpatch', False): |
|
3397 | 3397 | # Committing a new patch, must be tip |
|
3398 | 3398 | qbase = len(cl) - 1 |
|
3399 | 3399 | else: |
|
3400 | 3400 | qbasenode = q.applied[0].node |
|
3401 | 3401 | try: |
|
3402 | 3402 | qbase = cl.rev(qbasenode) |
|
3403 | 3403 | except error.LookupError: |
|
3404 | 3404 | self.ui.warn(_('mq status file refers to unknown node %s\n') |
|
3405 | 3405 | % short(qbasenode)) |
|
3406 | 3406 | if qbase is None: |
|
3407 | 3407 | return super(mqrepo, self)._branchtags(partial, lrev) |
|
3408 | 3408 | |
|
3409 | 3409 | start = lrev + 1 |
|
3410 | 3410 | if start < qbase: |
|
3411 | 3411 | # update the cache (excluding the patches) and save it |
|
3412 | 3412 | ctxgen = (self[r] for r in xrange(lrev + 1, qbase)) |
|
3413 | 3413 | self._updatebranchcache(partial, ctxgen) |
|
3414 | 3414 | self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1) |
|
3415 | 3415 | start = qbase |
|
3416 | 3416 | # if start = qbase, the cache is as updated as it should be. |
|
3417 | 3417 | # if start > qbase, the cache includes (part of) the patches. |
|
3418 | 3418 | # we might as well use it, but we won't save it. |
|
3419 | 3419 | |
|
3420 | 3420 | # update the cache up to the tip |
|
3421 | 3421 | ctxgen = (self[r] for r in xrange(start, len(cl))) |
|
3422 | 3422 | self._updatebranchcache(partial, ctxgen) |
|
3423 | 3423 | |
|
3424 | 3424 | return partial |
|
3425 | 3425 | |
|
3426 | 3426 | if repo.local(): |
|
3427 | 3427 | repo.__class__ = mqrepo |
|
3428 | 3428 | |
|
3429 | 3429 | repo._phasedefaults.append(mqphasedefaults) |
|
3430 | 3430 | |
|
3431 | 3431 | def mqimport(orig, ui, repo, *args, **kwargs): |
|
3432 | 3432 | if (util.safehasattr(repo, 'abortifwdirpatched') |
|
3433 | 3433 | and not kwargs.get('no_commit', False)): |
|
3434 | 3434 | repo.abortifwdirpatched(_('cannot import over an applied patch'), |
|
3435 | 3435 | kwargs.get('force')) |
|
3436 | 3436 | return orig(ui, repo, *args, **kwargs) |
|
3437 | 3437 | |
|
3438 | 3438 | def mqinit(orig, ui, *args, **kwargs): |
|
3439 | 3439 | mq = kwargs.pop('mq', None) |
|
3440 | 3440 | |
|
3441 | 3441 | if not mq: |
|
3442 | 3442 | return orig(ui, *args, **kwargs) |
|
3443 | 3443 | |
|
3444 | 3444 | if args: |
|
3445 | 3445 | repopath = args[0] |
|
3446 | 3446 | if not hg.islocal(repopath): |
|
3447 | 3447 | raise util.Abort(_('only a local queue repository ' |
|
3448 | 3448 | 'may be initialized')) |
|
3449 | 3449 | else: |
|
3450 | 3450 | repopath = cmdutil.findrepo(os.getcwd()) |
|
3451 | 3451 | if not repopath: |
|
3452 | 3452 | raise util.Abort(_('there is no Mercurial repository here ' |
|
3453 | 3453 | '(.hg not found)')) |
|
3454 | 3454 | repo = hg.repository(ui, repopath) |
|
3455 | 3455 | return qinit(ui, repo, True) |
|
3456 | 3456 | |
|
3457 | 3457 | def mqcommand(orig, ui, repo, *args, **kwargs): |
|
3458 | 3458 | """Add --mq option to operate on patch repository instead of main""" |
|
3459 | 3459 | |
|
3460 | 3460 | # some commands do not like getting unknown options |
|
3461 | 3461 | mq = kwargs.pop('mq', None) |
|
3462 | 3462 | |
|
3463 | 3463 | if not mq: |
|
3464 | 3464 | return orig(ui, repo, *args, **kwargs) |
|
3465 | 3465 | |
|
3466 | 3466 | q = repo.mq |
|
3467 | 3467 | r = q.qrepo() |
|
3468 | 3468 | if not r: |
|
3469 | 3469 | raise util.Abort(_('no queue repository')) |
|
3470 | 3470 | return orig(r.ui, r, *args, **kwargs) |
|
3471 | 3471 | |
|
3472 | 3472 | def summary(orig, ui, repo, *args, **kwargs): |
|
3473 | 3473 | r = orig(ui, repo, *args, **kwargs) |
|
3474 | 3474 | q = repo.mq |
|
3475 | 3475 | m = [] |
|
3476 | 3476 | a, u = len(q.applied), len(q.unapplied(repo)) |
|
3477 | 3477 | if a: |
|
3478 | 3478 | m.append(ui.label(_("%d applied"), 'qseries.applied') % a) |
|
3479 | 3479 | if u: |
|
3480 | 3480 | m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u) |
|
3481 | 3481 | if m: |
|
3482 | 3482 | ui.write("mq: %s\n" % ', '.join(m)) |
|
3483 | 3483 | else: |
|
3484 | 3484 | ui.note(_("mq: (empty queue)\n")) |
|
3485 | 3485 | return r |
|
3486 | 3486 | |
|
3487 | 3487 | def revsetmq(repo, subset, x): |
|
3488 | 3488 | """``mq()`` |
|
3489 | 3489 | Changesets managed by MQ. |
|
3490 | 3490 | """ |
|
3491 | 3491 | revset.getargs(x, 0, 0, _("mq takes no arguments")) |
|
3492 | 3492 | applied = set([repo[r.node].rev() for r in repo.mq.applied]) |
|
3493 | 3493 | return [r for r in subset if r in applied] |
|
3494 | 3494 | |
|
3495 | 3495 | def extsetup(ui): |
|
3496 | 3496 | revset.symbols['mq'] = revsetmq |
|
3497 | 3497 | |
|
3498 | 3498 | # tell hggettext to extract docstrings from these functions: |
|
3499 | 3499 | i18nfunctions = [revsetmq] |
|
3500 | 3500 | |
|
3501 | 3501 | def uisetup(ui): |
|
3502 | 3502 | mqopt = [('', 'mq', None, _("operate on patch repository"))] |
|
3503 | 3503 | |
|
3504 | 3504 | extensions.wrapcommand(commands.table, 'import', mqimport) |
|
3505 | 3505 | extensions.wrapcommand(commands.table, 'summary', summary) |
|
3506 | 3506 | |
|
3507 | 3507 | entry = extensions.wrapcommand(commands.table, 'init', mqinit) |
|
3508 | 3508 | entry[1].extend(mqopt) |
|
3509 | 3509 | |
|
3510 | 3510 | nowrap = set(commands.norepo.split(" ")) |
|
3511 | 3511 | |
|
3512 | 3512 | def dotable(cmdtable): |
|
3513 | 3513 | for cmd in cmdtable.keys(): |
|
3514 | 3514 | cmd = cmdutil.parsealiases(cmd)[0] |
|
3515 | 3515 | if cmd in nowrap: |
|
3516 | 3516 | continue |
|
3517 | 3517 | entry = extensions.wrapcommand(cmdtable, cmd, mqcommand) |
|
3518 | 3518 | entry[1].extend(mqopt) |
|
3519 | 3519 | |
|
3520 | 3520 | dotable(commands.table) |
|
3521 | 3521 | |
|
3522 | 3522 | for extname, extmodule in extensions.extensions(): |
|
3523 | 3523 | if extmodule.__file__ != __file__: |
|
3524 | 3524 | dotable(getattr(extmodule, 'cmdtable', {})) |
|
3525 | 3525 | |
|
3526 | 3526 | |
|
3527 | 3527 | colortable = {'qguard.negative': 'red', |
|
3528 | 3528 | 'qguard.positive': 'yellow', |
|
3529 | 3529 | 'qguard.unguarded': 'green', |
|
3530 | 3530 | 'qseries.applied': 'blue bold underline', |
|
3531 | 3531 | 'qseries.guarded': 'black bold', |
|
3532 | 3532 | 'qseries.missing': 'red bold', |
|
3533 | 3533 | 'qseries.unapplied': 'black bold'} |
@@ -1,1653 +1,1653 | |||
|
1 | 1 | # cmdutil.py - help for command processing in mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from node import hex, nullid, nullrev, short |
|
9 | 9 | from i18n import _ |
|
10 | 10 | import os, sys, errno, re, tempfile |
|
11 | 11 | import util, scmutil, templater, patch, error, templatekw, revlog, copies |
|
12 | 12 | import match as matchmod |
|
13 | 13 | import subrepo, context, repair, bookmarks |
|
14 | 14 | |
|
15 | 15 | def parsealiases(cmd): |
|
16 | 16 | return cmd.lstrip("^").split("|") |
|
17 | 17 | |
|
18 | 18 | def findpossible(cmd, table, strict=False): |
|
19 | 19 | """ |
|
20 | 20 | Return cmd -> (aliases, command table entry) |
|
21 | 21 | for each matching command. |
|
22 | 22 | Return debug commands (or their aliases) only if no normal command matches. |
|
23 | 23 | """ |
|
24 | 24 | choice = {} |
|
25 | 25 | debugchoice = {} |
|
26 | 26 | |
|
27 | 27 | if cmd in table: |
|
28 | 28 | # short-circuit exact matches, "log" alias beats "^log|history" |
|
29 | 29 | keys = [cmd] |
|
30 | 30 | else: |
|
31 | 31 | keys = table.keys() |
|
32 | 32 | |
|
33 | 33 | for e in keys: |
|
34 | 34 | aliases = parsealiases(e) |
|
35 | 35 | found = None |
|
36 | 36 | if cmd in aliases: |
|
37 | 37 | found = cmd |
|
38 | 38 | elif not strict: |
|
39 | 39 | for a in aliases: |
|
40 | 40 | if a.startswith(cmd): |
|
41 | 41 | found = a |
|
42 | 42 | break |
|
43 | 43 | if found is not None: |
|
44 | 44 | if aliases[0].startswith("debug") or found.startswith("debug"): |
|
45 | 45 | debugchoice[found] = (aliases, table[e]) |
|
46 | 46 | else: |
|
47 | 47 | choice[found] = (aliases, table[e]) |
|
48 | 48 | |
|
49 | 49 | if not choice and debugchoice: |
|
50 | 50 | choice = debugchoice |
|
51 | 51 | |
|
52 | 52 | return choice |
|
53 | 53 | |
|
54 | 54 | def findcmd(cmd, table, strict=True): |
|
55 | 55 | """Return (aliases, command table entry) for command string.""" |
|
56 | 56 | choice = findpossible(cmd, table, strict) |
|
57 | 57 | |
|
58 | 58 | if cmd in choice: |
|
59 | 59 | return choice[cmd] |
|
60 | 60 | |
|
61 | 61 | if len(choice) > 1: |
|
62 | 62 | clist = choice.keys() |
|
63 | 63 | clist.sort() |
|
64 | 64 | raise error.AmbiguousCommand(cmd, clist) |
|
65 | 65 | |
|
66 | 66 | if choice: |
|
67 | 67 | return choice.values()[0] |
|
68 | 68 | |
|
69 | 69 | raise error.UnknownCommand(cmd) |
|
70 | 70 | |
|
71 | 71 | def findrepo(p): |
|
72 | 72 | while not os.path.isdir(os.path.join(p, ".hg")): |
|
73 | 73 | oldp, p = p, os.path.dirname(p) |
|
74 | 74 | if p == oldp: |
|
75 | 75 | return None |
|
76 | 76 | |
|
77 | 77 | return p |
|
78 | 78 | |
|
79 | 79 | def bailifchanged(repo): |
|
80 | 80 | if repo.dirstate.p2() != nullid: |
|
81 | 81 | raise util.Abort(_('outstanding uncommitted merge')) |
|
82 | 82 | modified, added, removed, deleted = repo.status()[:4] |
|
83 | 83 | if modified or added or removed or deleted: |
|
84 | 84 | raise util.Abort(_("outstanding uncommitted changes")) |
|
85 | 85 | ctx = repo[None] |
|
86 | 86 | for s in ctx.substate: |
|
87 | 87 | if ctx.sub(s).dirty(): |
|
88 | 88 | raise util.Abort(_("uncommitted changes in subrepo %s") % s) |
|
89 | 89 | |
|
90 | 90 | def logmessage(ui, opts): |
|
91 | 91 | """ get the log message according to -m and -l option """ |
|
92 | 92 | message = opts.get('message') |
|
93 | 93 | logfile = opts.get('logfile') |
|
94 | 94 | |
|
95 | 95 | if message and logfile: |
|
96 | 96 | raise util.Abort(_('options --message and --logfile are mutually ' |
|
97 | 97 | 'exclusive')) |
|
98 | 98 | if not message and logfile: |
|
99 | 99 | try: |
|
100 | 100 | if logfile == '-': |
|
101 | 101 | message = ui.fin.read() |
|
102 | 102 | else: |
|
103 | 103 | message = '\n'.join(util.readfile(logfile).splitlines()) |
|
104 | 104 | except IOError, inst: |
|
105 | 105 | raise util.Abort(_("can't read commit message '%s': %s") % |
|
106 | 106 | (logfile, inst.strerror)) |
|
107 | 107 | return message |
|
108 | 108 | |
|
109 | 109 | def loglimit(opts): |
|
110 | 110 | """get the log limit according to option -l/--limit""" |
|
111 | 111 | limit = opts.get('limit') |
|
112 | 112 | if limit: |
|
113 | 113 | try: |
|
114 | 114 | limit = int(limit) |
|
115 | 115 | except ValueError: |
|
116 | 116 | raise util.Abort(_('limit must be a positive integer')) |
|
117 | 117 | if limit <= 0: |
|
118 | 118 | raise util.Abort(_('limit must be positive')) |
|
119 | 119 | else: |
|
120 | 120 | limit = None |
|
121 | 121 | return limit |
|
122 | 122 | |
|
123 | 123 | def makefilename(repo, pat, node, desc=None, |
|
124 | 124 | total=None, seqno=None, revwidth=None, pathname=None): |
|
125 | 125 | node_expander = { |
|
126 | 126 | 'H': lambda: hex(node), |
|
127 | 127 | 'R': lambda: str(repo.changelog.rev(node)), |
|
128 | 128 | 'h': lambda: short(node), |
|
129 | 129 | 'm': lambda: re.sub('[^\w]', '_', str(desc)) |
|
130 | 130 | } |
|
131 | 131 | expander = { |
|
132 | 132 | '%': lambda: '%', |
|
133 | 133 | 'b': lambda: os.path.basename(repo.root), |
|
134 | 134 | } |
|
135 | 135 | |
|
136 | 136 | try: |
|
137 | 137 | if node: |
|
138 | 138 | expander.update(node_expander) |
|
139 | 139 | if node: |
|
140 | 140 | expander['r'] = (lambda: |
|
141 | 141 | str(repo.changelog.rev(node)).zfill(revwidth or 0)) |
|
142 | 142 | if total is not None: |
|
143 | 143 | expander['N'] = lambda: str(total) |
|
144 | 144 | if seqno is not None: |
|
145 | 145 | expander['n'] = lambda: str(seqno) |
|
146 | 146 | if total is not None and seqno is not None: |
|
147 | 147 | expander['n'] = lambda: str(seqno).zfill(len(str(total))) |
|
148 | 148 | if pathname is not None: |
|
149 | 149 | expander['s'] = lambda: os.path.basename(pathname) |
|
150 | 150 | expander['d'] = lambda: os.path.dirname(pathname) or '.' |
|
151 | 151 | expander['p'] = lambda: pathname |
|
152 | 152 | |
|
153 | 153 | newname = [] |
|
154 | 154 | patlen = len(pat) |
|
155 | 155 | i = 0 |
|
156 | 156 | while i < patlen: |
|
157 | 157 | c = pat[i] |
|
158 | 158 | if c == '%': |
|
159 | 159 | i += 1 |
|
160 | 160 | c = pat[i] |
|
161 | 161 | c = expander[c]() |
|
162 | 162 | newname.append(c) |
|
163 | 163 | i += 1 |
|
164 | 164 | return ''.join(newname) |
|
165 | 165 | except KeyError, inst: |
|
166 | 166 | raise util.Abort(_("invalid format spec '%%%s' in output filename") % |
|
167 | 167 | inst.args[0]) |
|
168 | 168 | |
|
169 | 169 | def makefileobj(repo, pat, node=None, desc=None, total=None, |
|
170 | 170 | seqno=None, revwidth=None, mode='wb', pathname=None): |
|
171 | 171 | |
|
172 | 172 | writable = mode not in ('r', 'rb') |
|
173 | 173 | |
|
174 | 174 | if not pat or pat == '-': |
|
175 | 175 | fp = writable and repo.ui.fout or repo.ui.fin |
|
176 | 176 | if util.safehasattr(fp, 'fileno'): |
|
177 | 177 | return os.fdopen(os.dup(fp.fileno()), mode) |
|
178 | 178 | else: |
|
179 | 179 | # if this fp can't be duped properly, return |
|
180 | 180 | # a dummy object that can be closed |
|
181 | 181 | class wrappedfileobj(object): |
|
182 | 182 | noop = lambda x: None |
|
183 | 183 | def __init__(self, f): |
|
184 | 184 | self.f = f |
|
185 | 185 | def __getattr__(self, attr): |
|
186 | 186 | if attr == 'close': |
|
187 | 187 | return self.noop |
|
188 | 188 | else: |
|
189 | 189 | return getattr(self.f, attr) |
|
190 | 190 | |
|
191 | 191 | return wrappedfileobj(fp) |
|
192 | 192 | if util.safehasattr(pat, 'write') and writable: |
|
193 | 193 | return pat |
|
194 | 194 | if util.safehasattr(pat, 'read') and 'r' in mode: |
|
195 | 195 | return pat |
|
196 | 196 | return open(makefilename(repo, pat, node, desc, total, seqno, revwidth, |
|
197 | 197 | pathname), |
|
198 | 198 | mode) |
|
199 | 199 | |
|
200 | 200 | def openrevlog(repo, cmd, file_, opts): |
|
201 | 201 | """opens the changelog, manifest, a filelog or a given revlog""" |
|
202 | 202 | cl = opts['changelog'] |
|
203 | 203 | mf = opts['manifest'] |
|
204 | 204 | msg = None |
|
205 | 205 | if cl and mf: |
|
206 | 206 | msg = _('cannot specify --changelog and --manifest at the same time') |
|
207 | 207 | elif cl or mf: |
|
208 | 208 | if file_: |
|
209 | 209 | msg = _('cannot specify filename with --changelog or --manifest') |
|
210 | 210 | elif not repo: |
|
211 | 211 | msg = _('cannot specify --changelog or --manifest ' |
|
212 | 212 | 'without a repository') |
|
213 | 213 | if msg: |
|
214 | 214 | raise util.Abort(msg) |
|
215 | 215 | |
|
216 | 216 | r = None |
|
217 | 217 | if repo: |
|
218 | 218 | if cl: |
|
219 | 219 | r = repo.changelog |
|
220 | 220 | elif mf: |
|
221 | 221 | r = repo.manifest |
|
222 | 222 | elif file_: |
|
223 | 223 | filelog = repo.file(file_) |
|
224 | 224 | if len(filelog): |
|
225 | 225 | r = filelog |
|
226 | 226 | if not r: |
|
227 | 227 | if not file_: |
|
228 | 228 | raise error.CommandError(cmd, _('invalid arguments')) |
|
229 | 229 | if not os.path.isfile(file_): |
|
230 | 230 | raise util.Abort(_("revlog '%s' not found") % file_) |
|
231 | 231 | r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), |
|
232 | 232 | file_[:-2] + ".i") |
|
233 | 233 | return r |
|
234 | 234 | |
|
235 | 235 | def copy(ui, repo, pats, opts, rename=False): |
|
236 | 236 | # called with the repo lock held |
|
237 | 237 | # |
|
238 | 238 | # hgsep => pathname that uses "/" to separate directories |
|
239 | 239 | # ossep => pathname that uses os.sep to separate directories |
|
240 | 240 | cwd = repo.getcwd() |
|
241 | 241 | targets = {} |
|
242 | 242 | after = opts.get("after") |
|
243 | 243 | dryrun = opts.get("dry_run") |
|
244 | 244 | wctx = repo[None] |
|
245 | 245 | |
|
246 | 246 | def walkpat(pat): |
|
247 | 247 | srcs = [] |
|
248 | 248 | badstates = after and '?' or '?r' |
|
249 | 249 | m = scmutil.match(repo[None], [pat], opts, globbed=True) |
|
250 | 250 | for abs in repo.walk(m): |
|
251 | 251 | state = repo.dirstate[abs] |
|
252 | 252 | rel = m.rel(abs) |
|
253 | 253 | exact = m.exact(abs) |
|
254 | 254 | if state in badstates: |
|
255 | 255 | if exact and state == '?': |
|
256 | 256 | ui.warn(_('%s: not copying - file is not managed\n') % rel) |
|
257 | 257 | if exact and state == 'r': |
|
258 | 258 | ui.warn(_('%s: not copying - file has been marked for' |
|
259 | 259 | ' remove\n') % rel) |
|
260 | 260 | continue |
|
261 | 261 | # abs: hgsep |
|
262 | 262 | # rel: ossep |
|
263 | 263 | srcs.append((abs, rel, exact)) |
|
264 | 264 | return srcs |
|
265 | 265 | |
|
266 | 266 | # abssrc: hgsep |
|
267 | 267 | # relsrc: ossep |
|
268 | 268 | # otarget: ossep |
|
269 | 269 | def copyfile(abssrc, relsrc, otarget, exact): |
|
270 | 270 | abstarget = scmutil.canonpath(repo.root, cwd, otarget) |
|
271 | 271 | if '/' in abstarget: |
|
272 | 272 | # We cannot normalize abstarget itself, this would prevent |
|
273 | 273 | # case only renames, like a => A. |
|
274 | 274 | abspath, absname = abstarget.rsplit('/', 1) |
|
275 | 275 | abstarget = repo.dirstate.normalize(abspath) + '/' + absname |
|
276 | 276 | reltarget = repo.pathto(abstarget, cwd) |
|
277 | 277 | target = repo.wjoin(abstarget) |
|
278 | 278 | src = repo.wjoin(abssrc) |
|
279 | 279 | state = repo.dirstate[abstarget] |
|
280 | 280 | |
|
281 | 281 | scmutil.checkportable(ui, abstarget) |
|
282 | 282 | |
|
283 | 283 | # check for collisions |
|
284 | 284 | prevsrc = targets.get(abstarget) |
|
285 | 285 | if prevsrc is not None: |
|
286 | 286 | ui.warn(_('%s: not overwriting - %s collides with %s\n') % |
|
287 | 287 | (reltarget, repo.pathto(abssrc, cwd), |
|
288 | 288 | repo.pathto(prevsrc, cwd))) |
|
289 | 289 | return |
|
290 | 290 | |
|
291 | 291 | # check for overwrites |
|
292 | 292 | exists = os.path.lexists(target) |
|
293 | 293 | samefile = False |
|
294 | 294 | if exists and abssrc != abstarget: |
|
295 | 295 | if (repo.dirstate.normalize(abssrc) == |
|
296 | 296 | repo.dirstate.normalize(abstarget)): |
|
297 | 297 | if not rename: |
|
298 | 298 | ui.warn(_("%s: can't copy - same file\n") % reltarget) |
|
299 | 299 | return |
|
300 | 300 | exists = False |
|
301 | 301 | samefile = True |
|
302 | 302 | |
|
303 | 303 | if not after and exists or after and state in 'mn': |
|
304 | 304 | if not opts['force']: |
|
305 | 305 | ui.warn(_('%s: not overwriting - file exists\n') % |
|
306 | 306 | reltarget) |
|
307 | 307 | return |
|
308 | 308 | |
|
309 | 309 | if after: |
|
310 | 310 | if not exists: |
|
311 | 311 | if rename: |
|
312 | 312 | ui.warn(_('%s: not recording move - %s does not exist\n') % |
|
313 | 313 | (relsrc, reltarget)) |
|
314 | 314 | else: |
|
315 | 315 | ui.warn(_('%s: not recording copy - %s does not exist\n') % |
|
316 | 316 | (relsrc, reltarget)) |
|
317 | 317 | return |
|
318 | 318 | elif not dryrun: |
|
319 | 319 | try: |
|
320 | 320 | if exists: |
|
321 | 321 | os.unlink(target) |
|
322 | 322 | targetdir = os.path.dirname(target) or '.' |
|
323 | 323 | if not os.path.isdir(targetdir): |
|
324 | 324 | os.makedirs(targetdir) |
|
325 | 325 | if samefile: |
|
326 | 326 | tmp = target + "~hgrename" |
|
327 | 327 | os.rename(src, tmp) |
|
328 | 328 | os.rename(tmp, target) |
|
329 | 329 | else: |
|
330 | 330 | util.copyfile(src, target) |
|
331 | 331 | srcexists = True |
|
332 | 332 | except IOError, inst: |
|
333 | 333 | if inst.errno == errno.ENOENT: |
|
334 | 334 | ui.warn(_('%s: deleted in working copy\n') % relsrc) |
|
335 | 335 | srcexists = False |
|
336 | 336 | else: |
|
337 | 337 | ui.warn(_('%s: cannot copy - %s\n') % |
|
338 | 338 | (relsrc, inst.strerror)) |
|
339 | 339 | return True # report a failure |
|
340 | 340 | |
|
341 | 341 | if ui.verbose or not exact: |
|
342 | 342 | if rename: |
|
343 | 343 | ui.status(_('moving %s to %s\n') % (relsrc, reltarget)) |
|
344 | 344 | else: |
|
345 | 345 | ui.status(_('copying %s to %s\n') % (relsrc, reltarget)) |
|
346 | 346 | |
|
347 | 347 | targets[abstarget] = abssrc |
|
348 | 348 | |
|
349 | 349 | # fix up dirstate |
|
350 | 350 | scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget, |
|
351 | 351 | dryrun=dryrun, cwd=cwd) |
|
352 | 352 | if rename and not dryrun: |
|
353 | 353 | if not after and srcexists and not samefile: |
|
354 | 354 | util.unlinkpath(repo.wjoin(abssrc)) |
|
355 | 355 | wctx.forget([abssrc]) |
|
356 | 356 | |
|
357 | 357 | # pat: ossep |
|
358 | 358 | # dest ossep |
|
359 | 359 | # srcs: list of (hgsep, hgsep, ossep, bool) |
|
360 | 360 | # return: function that takes hgsep and returns ossep |
|
361 | 361 | def targetpathfn(pat, dest, srcs): |
|
362 | 362 | if os.path.isdir(pat): |
|
363 | 363 | abspfx = scmutil.canonpath(repo.root, cwd, pat) |
|
364 | 364 | abspfx = util.localpath(abspfx) |
|
365 | 365 | if destdirexists: |
|
366 | 366 | striplen = len(os.path.split(abspfx)[0]) |
|
367 | 367 | else: |
|
368 | 368 | striplen = len(abspfx) |
|
369 | 369 | if striplen: |
|
370 | 370 | striplen += len(os.sep) |
|
371 | 371 | res = lambda p: os.path.join(dest, util.localpath(p)[striplen:]) |
|
372 | 372 | elif destdirexists: |
|
373 | 373 | res = lambda p: os.path.join(dest, |
|
374 | 374 | os.path.basename(util.localpath(p))) |
|
375 | 375 | else: |
|
376 | 376 | res = lambda p: dest |
|
377 | 377 | return res |
|
378 | 378 | |
|
379 | 379 | # pat: ossep |
|
380 | 380 | # dest ossep |
|
381 | 381 | # srcs: list of (hgsep, hgsep, ossep, bool) |
|
382 | 382 | # return: function that takes hgsep and returns ossep |
|
383 | 383 | def targetpathafterfn(pat, dest, srcs): |
|
384 | 384 | if matchmod.patkind(pat): |
|
385 | 385 | # a mercurial pattern |
|
386 | 386 | res = lambda p: os.path.join(dest, |
|
387 | 387 | os.path.basename(util.localpath(p))) |
|
388 | 388 | else: |
|
389 | 389 | abspfx = scmutil.canonpath(repo.root, cwd, pat) |
|
390 | 390 | if len(abspfx) < len(srcs[0][0]): |
|
391 | 391 | # A directory. Either the target path contains the last |
|
392 | 392 | # component of the source path or it does not. |
|
393 | 393 | def evalpath(striplen): |
|
394 | 394 | score = 0 |
|
395 | 395 | for s in srcs: |
|
396 | 396 | t = os.path.join(dest, util.localpath(s[0])[striplen:]) |
|
397 | 397 | if os.path.lexists(t): |
|
398 | 398 | score += 1 |
|
399 | 399 | return score |
|
400 | 400 | |
|
401 | 401 | abspfx = util.localpath(abspfx) |
|
402 | 402 | striplen = len(abspfx) |
|
403 | 403 | if striplen: |
|
404 | 404 | striplen += len(os.sep) |
|
405 | 405 | if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])): |
|
406 | 406 | score = evalpath(striplen) |
|
407 | 407 | striplen1 = len(os.path.split(abspfx)[0]) |
|
408 | 408 | if striplen1: |
|
409 | 409 | striplen1 += len(os.sep) |
|
410 | 410 | if evalpath(striplen1) > score: |
|
411 | 411 | striplen = striplen1 |
|
412 | 412 | res = lambda p: os.path.join(dest, |
|
413 | 413 | util.localpath(p)[striplen:]) |
|
414 | 414 | else: |
|
415 | 415 | # a file |
|
416 | 416 | if destdirexists: |
|
417 | 417 | res = lambda p: os.path.join(dest, |
|
418 | 418 | os.path.basename(util.localpath(p))) |
|
419 | 419 | else: |
|
420 | 420 | res = lambda p: dest |
|
421 | 421 | return res |
|
422 | 422 | |
|
423 | 423 | |
|
424 | 424 | pats = scmutil.expandpats(pats) |
|
425 | 425 | if not pats: |
|
426 | 426 | raise util.Abort(_('no source or destination specified')) |
|
427 | 427 | if len(pats) == 1: |
|
428 | 428 | raise util.Abort(_('no destination specified')) |
|
429 | 429 | dest = pats.pop() |
|
430 | 430 | destdirexists = os.path.isdir(dest) and not os.path.islink(dest) |
|
431 | 431 | if not destdirexists: |
|
432 | 432 | if len(pats) > 1 or matchmod.patkind(pats[0]): |
|
433 | 433 | raise util.Abort(_('with multiple sources, destination must be an ' |
|
434 | 434 | 'existing directory')) |
|
435 | 435 | if util.endswithsep(dest): |
|
436 | 436 | raise util.Abort(_('destination %s is not a directory') % dest) |
|
437 | 437 | |
|
438 | 438 | tfn = targetpathfn |
|
439 | 439 | if after: |
|
440 | 440 | tfn = targetpathafterfn |
|
441 | 441 | copylist = [] |
|
442 | 442 | for pat in pats: |
|
443 | 443 | srcs = walkpat(pat) |
|
444 | 444 | if not srcs: |
|
445 | 445 | continue |
|
446 | 446 | copylist.append((tfn(pat, dest, srcs), srcs)) |
|
447 | 447 | if not copylist: |
|
448 | 448 | raise util.Abort(_('no files to copy')) |
|
449 | 449 | |
|
450 | 450 | errors = 0 |
|
451 | 451 | for targetpath, srcs in copylist: |
|
452 | 452 | for abssrc, relsrc, exact in srcs: |
|
453 | 453 | if copyfile(abssrc, relsrc, targetpath(abssrc), exact): |
|
454 | 454 | errors += 1 |
|
455 | 455 | |
|
456 | 456 | if errors: |
|
457 | 457 | ui.warn(_('(consider using --after)\n')) |
|
458 | 458 | |
|
459 | 459 | return errors != 0 |
|
460 | 460 | |
|
461 | 461 | def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None, |
|
462 | 462 | runargs=None, appendpid=False): |
|
463 | 463 | '''Run a command as a service.''' |
|
464 | 464 | |
|
465 | 465 | if opts['daemon'] and not opts['daemon_pipefds']: |
|
466 | 466 | # Signal child process startup with file removal |
|
467 | 467 | lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-') |
|
468 | 468 | os.close(lockfd) |
|
469 | 469 | try: |
|
470 | 470 | if not runargs: |
|
471 | 471 | runargs = util.hgcmd() + sys.argv[1:] |
|
472 | 472 | runargs.append('--daemon-pipefds=%s' % lockpath) |
|
473 | 473 | # Don't pass --cwd to the child process, because we've already |
|
474 | 474 | # changed directory. |
|
475 | 475 | for i in xrange(1, len(runargs)): |
|
476 | 476 | if runargs[i].startswith('--cwd='): |
|
477 | 477 | del runargs[i] |
|
478 | 478 | break |
|
479 | 479 | elif runargs[i].startswith('--cwd'): |
|
480 | 480 | del runargs[i:i + 2] |
|
481 | 481 | break |
|
482 | 482 | def condfn(): |
|
483 | 483 | return not os.path.exists(lockpath) |
|
484 | 484 | pid = util.rundetached(runargs, condfn) |
|
485 | 485 | if pid < 0: |
|
486 | 486 | raise util.Abort(_('child process failed to start')) |
|
487 | 487 | finally: |
|
488 | 488 | try: |
|
489 | 489 | os.unlink(lockpath) |
|
490 | 490 | except OSError, e: |
|
491 | 491 | if e.errno != errno.ENOENT: |
|
492 | 492 | raise |
|
493 | 493 | if parentfn: |
|
494 | 494 | return parentfn(pid) |
|
495 | 495 | else: |
|
496 | 496 | return |
|
497 | 497 | |
|
498 | 498 | if initfn: |
|
499 | 499 | initfn() |
|
500 | 500 | |
|
501 | 501 | if opts['pid_file']: |
|
502 | 502 | mode = appendpid and 'a' or 'w' |
|
503 | 503 | fp = open(opts['pid_file'], mode) |
|
504 | 504 | fp.write(str(os.getpid()) + '\n') |
|
505 | 505 | fp.close() |
|
506 | 506 | |
|
507 | 507 | if opts['daemon_pipefds']: |
|
508 | 508 | lockpath = opts['daemon_pipefds'] |
|
509 | 509 | try: |
|
510 | 510 | os.setsid() |
|
511 | 511 | except AttributeError: |
|
512 | 512 | pass |
|
513 | 513 | os.unlink(lockpath) |
|
514 | 514 | util.hidewindow() |
|
515 | 515 | sys.stdout.flush() |
|
516 | 516 | sys.stderr.flush() |
|
517 | 517 | |
|
518 | 518 | nullfd = os.open(util.nulldev, os.O_RDWR) |
|
519 | 519 | logfilefd = nullfd |
|
520 | 520 | if logfile: |
|
521 | 521 | logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND) |
|
522 | 522 | os.dup2(nullfd, 0) |
|
523 | 523 | os.dup2(logfilefd, 1) |
|
524 | 524 | os.dup2(logfilefd, 2) |
|
525 | 525 | if nullfd not in (0, 1, 2): |
|
526 | 526 | os.close(nullfd) |
|
527 | 527 | if logfile and logfilefd not in (0, 1, 2): |
|
528 | 528 | os.close(logfilefd) |
|
529 | 529 | |
|
530 | 530 | if runfn: |
|
531 | 531 | return runfn() |
|
532 | 532 | |
|
533 | 533 | def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False, |
|
534 | 534 | opts=None): |
|
535 | 535 | '''export changesets as hg patches.''' |
|
536 | 536 | |
|
537 | 537 | total = len(revs) |
|
538 | 538 | revwidth = max([len(str(rev)) for rev in revs]) |
|
539 | 539 | |
|
540 | 540 | def single(rev, seqno, fp): |
|
541 | 541 | ctx = repo[rev] |
|
542 | 542 | node = ctx.node() |
|
543 | 543 | parents = [p.node() for p in ctx.parents() if p] |
|
544 | 544 | branch = ctx.branch() |
|
545 | 545 | if switch_parent: |
|
546 | 546 | parents.reverse() |
|
547 | 547 | prev = (parents and parents[0]) or nullid |
|
548 | 548 | |
|
549 | 549 | shouldclose = False |
|
550 | 550 | if not fp: |
|
551 | 551 | desc_lines = ctx.description().rstrip().split('\n') |
|
552 | 552 | desc = desc_lines[0] #Commit always has a first line. |
|
553 | 553 | fp = makefileobj(repo, template, node, desc=desc, total=total, |
|
554 | 554 | seqno=seqno, revwidth=revwidth, mode='ab') |
|
555 | 555 | if fp != template: |
|
556 | 556 | shouldclose = True |
|
557 | 557 | if fp != sys.stdout and util.safehasattr(fp, 'name'): |
|
558 | 558 | repo.ui.note("%s\n" % fp.name) |
|
559 | 559 | |
|
560 | 560 | fp.write("# HG changeset patch\n") |
|
561 | 561 | fp.write("# User %s\n" % ctx.user()) |
|
562 | 562 | fp.write("# Date %d %d\n" % ctx.date()) |
|
563 | 563 | if branch and branch != 'default': |
|
564 | 564 | fp.write("# Branch %s\n" % branch) |
|
565 | 565 | fp.write("# Node ID %s\n" % hex(node)) |
|
566 | 566 | fp.write("# Parent %s\n" % hex(prev)) |
|
567 | 567 | if len(parents) > 1: |
|
568 | 568 | fp.write("# Parent %s\n" % hex(parents[1])) |
|
569 | 569 | fp.write(ctx.description().rstrip()) |
|
570 | 570 | fp.write("\n\n") |
|
571 | 571 | |
|
572 | 572 | for chunk in patch.diff(repo, prev, node, opts=opts): |
|
573 | 573 | fp.write(chunk) |
|
574 | 574 | |
|
575 | 575 | if shouldclose: |
|
576 | 576 | fp.close() |
|
577 | 577 | |
|
578 | 578 | for seqno, rev in enumerate(revs): |
|
579 | 579 | single(rev, seqno + 1, fp) |
|
580 | 580 | |
|
581 | 581 | def diffordiffstat(ui, repo, diffopts, node1, node2, match, |
|
582 | 582 | changes=None, stat=False, fp=None, prefix='', |
|
583 | 583 | listsubrepos=False): |
|
584 | 584 | '''show diff or diffstat.''' |
|
585 | 585 | if fp is None: |
|
586 | 586 | write = ui.write |
|
587 | 587 | else: |
|
588 | 588 | def write(s, **kw): |
|
589 | 589 | fp.write(s) |
|
590 | 590 | |
|
591 | 591 | if stat: |
|
592 | 592 | diffopts = diffopts.copy(context=0) |
|
593 | 593 | width = 80 |
|
594 | 594 | if not ui.plain(): |
|
595 | 595 | width = ui.termwidth() |
|
596 | 596 | chunks = patch.diff(repo, node1, node2, match, changes, diffopts, |
|
597 | 597 | prefix=prefix) |
|
598 | 598 | for chunk, label in patch.diffstatui(util.iterlines(chunks), |
|
599 | 599 | width=width, |
|
600 | 600 | git=diffopts.git): |
|
601 | 601 | write(chunk, label=label) |
|
602 | 602 | else: |
|
603 | 603 | for chunk, label in patch.diffui(repo, node1, node2, match, |
|
604 | 604 | changes, diffopts, prefix=prefix): |
|
605 | 605 | write(chunk, label=label) |
|
606 | 606 | |
|
607 | 607 | if listsubrepos: |
|
608 | 608 | ctx1 = repo[node1] |
|
609 | 609 | ctx2 = repo[node2] |
|
610 | 610 | for subpath, sub in subrepo.itersubrepos(ctx1, ctx2): |
|
611 | 611 | tempnode2 = node2 |
|
612 | 612 | try: |
|
613 | 613 | if node2 is not None: |
|
614 | 614 | tempnode2 = ctx2.substate[subpath][1] |
|
615 | 615 | except KeyError: |
|
616 | 616 | # A subrepo that existed in node1 was deleted between node1 and |
|
617 | 617 | # node2 (inclusive). Thus, ctx2's substate won't contain that |
|
618 | 618 | # subpath. The best we can do is to ignore it. |
|
619 | 619 | tempnode2 = None |
|
620 | 620 | submatch = matchmod.narrowmatcher(subpath, match) |
|
621 | 621 | sub.diff(diffopts, tempnode2, submatch, changes=changes, |
|
622 | 622 | stat=stat, fp=fp, prefix=prefix) |
|
623 | 623 | |
|
624 | 624 | class changeset_printer(object): |
|
625 | 625 | '''show changeset information when templating not requested.''' |
|
626 | 626 | |
|
627 | 627 | def __init__(self, ui, repo, patch, diffopts, buffered): |
|
628 | 628 | self.ui = ui |
|
629 | 629 | self.repo = repo |
|
630 | 630 | self.buffered = buffered |
|
631 | 631 | self.patch = patch |
|
632 | 632 | self.diffopts = diffopts |
|
633 | 633 | self.header = {} |
|
634 | 634 | self.hunk = {} |
|
635 | 635 | self.lastheader = None |
|
636 | 636 | self.footer = None |
|
637 | 637 | |
|
638 | 638 | def flush(self, rev): |
|
639 | 639 | if rev in self.header: |
|
640 | 640 | h = self.header[rev] |
|
641 | 641 | if h != self.lastheader: |
|
642 | 642 | self.lastheader = h |
|
643 | 643 | self.ui.write(h) |
|
644 | 644 | del self.header[rev] |
|
645 | 645 | if rev in self.hunk: |
|
646 | 646 | self.ui.write(self.hunk[rev]) |
|
647 | 647 | del self.hunk[rev] |
|
648 | 648 | return 1 |
|
649 | 649 | return 0 |
|
650 | 650 | |
|
651 | 651 | def close(self): |
|
652 | 652 | if self.footer: |
|
653 | 653 | self.ui.write(self.footer) |
|
654 | 654 | |
|
655 | 655 | def show(self, ctx, copies=None, matchfn=None, **props): |
|
656 | 656 | if self.buffered: |
|
657 | 657 | self.ui.pushbuffer() |
|
658 | 658 | self._show(ctx, copies, matchfn, props) |
|
659 | 659 | self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True) |
|
660 | 660 | else: |
|
661 | 661 | self._show(ctx, copies, matchfn, props) |
|
662 | 662 | |
|
663 | 663 | def _show(self, ctx, copies, matchfn, props): |
|
664 | 664 | '''show a single changeset or file revision''' |
|
665 | 665 | changenode = ctx.node() |
|
666 | 666 | rev = ctx.rev() |
|
667 | 667 | |
|
668 | 668 | if self.ui.quiet: |
|
669 | 669 | self.ui.write("%d:%s\n" % (rev, short(changenode)), |
|
670 | 670 | label='log.node') |
|
671 | 671 | return |
|
672 | 672 | |
|
673 | 673 | log = self.repo.changelog |
|
674 | 674 | date = util.datestr(ctx.date()) |
|
675 | 675 | |
|
676 | 676 | hexfunc = self.ui.debugflag and hex or short |
|
677 | 677 | |
|
678 | 678 | parents = [(p, hexfunc(log.node(p))) |
|
679 | 679 | for p in self._meaningful_parentrevs(log, rev)] |
|
680 | 680 | |
|
681 | 681 | self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)), |
|
682 | 682 | label='log.changeset') |
|
683 | 683 | |
|
684 | 684 | branch = ctx.branch() |
|
685 | 685 | # don't show the default branch name |
|
686 | 686 | if branch != 'default': |
|
687 | 687 | self.ui.write(_("branch: %s\n") % branch, |
|
688 | 688 | label='log.branch') |
|
689 | 689 | for bookmark in self.repo.nodebookmarks(changenode): |
|
690 | 690 | self.ui.write(_("bookmark: %s\n") % bookmark, |
|
691 | 691 | label='log.bookmark') |
|
692 | 692 | for tag in self.repo.nodetags(changenode): |
|
693 | 693 | self.ui.write(_("tag: %s\n") % tag, |
|
694 | 694 | label='log.tag') |
|
695 | 695 | if self.ui.debugflag and ctx.phase(): |
|
696 | 696 | self.ui.write(_("phase: %s\n") % _(ctx.phasestr()), |
|
697 | 697 | label='log.phase') |
|
698 | 698 | for parent in parents: |
|
699 | 699 | self.ui.write(_("parent: %d:%s\n") % parent, |
|
700 | 700 | label='log.parent') |
|
701 | 701 | |
|
702 | 702 | if self.ui.debugflag: |
|
703 | 703 | mnode = ctx.manifestnode() |
|
704 | 704 | self.ui.write(_("manifest: %d:%s\n") % |
|
705 | 705 | (self.repo.manifest.rev(mnode), hex(mnode)), |
|
706 | 706 | label='ui.debug log.manifest') |
|
707 | 707 | self.ui.write(_("user: %s\n") % ctx.user(), |
|
708 | 708 | label='log.user') |
|
709 | 709 | self.ui.write(_("date: %s\n") % date, |
|
710 | 710 | label='log.date') |
|
711 | 711 | |
|
712 | 712 | if self.ui.debugflag: |
|
713 | 713 | files = self.repo.status(log.parents(changenode)[0], changenode)[:3] |
|
714 | 714 | for key, value in zip([_("files:"), _("files+:"), _("files-:")], |
|
715 | 715 | files): |
|
716 | 716 | if value: |
|
717 | 717 | self.ui.write("%-12s %s\n" % (key, " ".join(value)), |
|
718 | 718 | label='ui.debug log.files') |
|
719 | 719 | elif ctx.files() and self.ui.verbose: |
|
720 | 720 | self.ui.write(_("files: %s\n") % " ".join(ctx.files()), |
|
721 | 721 | label='ui.note log.files') |
|
722 | 722 | if copies and self.ui.verbose: |
|
723 | 723 | copies = ['%s (%s)' % c for c in copies] |
|
724 | 724 | self.ui.write(_("copies: %s\n") % ' '.join(copies), |
|
725 | 725 | label='ui.note log.copies') |
|
726 | 726 | |
|
727 | 727 | extra = ctx.extra() |
|
728 | 728 | if extra and self.ui.debugflag: |
|
729 | 729 | for key, value in sorted(extra.items()): |
|
730 | 730 | self.ui.write(_("extra: %s=%s\n") |
|
731 | 731 | % (key, value.encode('string_escape')), |
|
732 | 732 | label='ui.debug log.extra') |
|
733 | 733 | |
|
734 | 734 | description = ctx.description().strip() |
|
735 | 735 | if description: |
|
736 | 736 | if self.ui.verbose: |
|
737 | 737 | self.ui.write(_("description:\n"), |
|
738 | 738 | label='ui.note log.description') |
|
739 | 739 | self.ui.write(description, |
|
740 | 740 | label='ui.note log.description') |
|
741 | 741 | self.ui.write("\n\n") |
|
742 | 742 | else: |
|
743 | 743 | self.ui.write(_("summary: %s\n") % |
|
744 | 744 | description.splitlines()[0], |
|
745 | 745 | label='log.summary') |
|
746 | 746 | self.ui.write("\n") |
|
747 | 747 | |
|
748 | 748 | self.showpatch(changenode, matchfn) |
|
749 | 749 | |
|
750 | 750 | def showpatch(self, node, matchfn): |
|
751 | 751 | if not matchfn: |
|
752 | 752 | matchfn = self.patch |
|
753 | 753 | if matchfn: |
|
754 | 754 | stat = self.diffopts.get('stat') |
|
755 | 755 | diff = self.diffopts.get('patch') |
|
756 | 756 | diffopts = patch.diffopts(self.ui, self.diffopts) |
|
757 | 757 | prev = self.repo.changelog.parents(node)[0] |
|
758 | 758 | if stat: |
|
759 | 759 | diffordiffstat(self.ui, self.repo, diffopts, prev, node, |
|
760 | 760 | match=matchfn, stat=True) |
|
761 | 761 | if diff: |
|
762 | 762 | if stat: |
|
763 | 763 | self.ui.write("\n") |
|
764 | 764 | diffordiffstat(self.ui, self.repo, diffopts, prev, node, |
|
765 | 765 | match=matchfn, stat=False) |
|
766 | 766 | self.ui.write("\n") |
|
767 | 767 | |
|
768 | 768 | def _meaningful_parentrevs(self, log, rev): |
|
769 | 769 | """Return list of meaningful (or all if debug) parentrevs for rev. |
|
770 | 770 | |
|
771 | 771 | For merges (two non-nullrev revisions) both parents are meaningful. |
|
772 | 772 | Otherwise the first parent revision is considered meaningful if it |
|
773 | 773 | is not the preceding revision. |
|
774 | 774 | """ |
|
775 | 775 | parents = log.parentrevs(rev) |
|
776 | 776 | if not self.ui.debugflag and parents[1] == nullrev: |
|
777 | 777 | if parents[0] >= rev - 1: |
|
778 | 778 | parents = [] |
|
779 | 779 | else: |
|
780 | 780 | parents = [parents[0]] |
|
781 | 781 | return parents |
|
782 | 782 | |
|
783 | 783 | |
|
784 | 784 | class changeset_templater(changeset_printer): |
|
785 | 785 | '''format changeset information.''' |
|
786 | 786 | |
|
787 | 787 | def __init__(self, ui, repo, patch, diffopts, mapfile, buffered): |
|
788 | 788 | changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered) |
|
789 | 789 | formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12]) |
|
790 | 790 | defaulttempl = { |
|
791 | 791 | 'parent': '{rev}:{node|formatnode} ', |
|
792 | 792 | 'manifest': '{rev}:{node|formatnode}', |
|
793 | 793 | 'file_copy': '{name} ({source})', |
|
794 | 794 | 'extra': '{key}={value|stringescape}' |
|
795 | 795 | } |
|
796 | 796 | # filecopy is preserved for compatibility reasons |
|
797 | 797 | defaulttempl['filecopy'] = defaulttempl['file_copy'] |
|
798 | 798 | self.t = templater.templater(mapfile, {'formatnode': formatnode}, |
|
799 | 799 | cache=defaulttempl) |
|
800 | 800 | self.cache = {} |
|
801 | 801 | |
|
802 | 802 | def use_template(self, t): |
|
803 | 803 | '''set template string to use''' |
|
804 | 804 | self.t.cache['changeset'] = t |
|
805 | 805 | |
|
806 | 806 | def _meaningful_parentrevs(self, ctx): |
|
807 | 807 | """Return list of meaningful (or all if debug) parentrevs for rev. |
|
808 | 808 | """ |
|
809 | 809 | parents = ctx.parents() |
|
810 | 810 | if len(parents) > 1: |
|
811 | 811 | return parents |
|
812 | 812 | if self.ui.debugflag: |
|
813 | 813 | return [parents[0], self.repo['null']] |
|
814 | 814 | if parents[0].rev() >= ctx.rev() - 1: |
|
815 | 815 | return [] |
|
816 | 816 | return parents |
|
817 | 817 | |
|
818 | 818 | def _show(self, ctx, copies, matchfn, props): |
|
819 | 819 | '''show a single changeset or file revision''' |
|
820 | 820 | |
|
821 | 821 | showlist = templatekw.showlist |
|
822 | 822 | |
|
823 | 823 | # showparents() behaviour depends on ui trace level which |
|
824 | 824 | # causes unexpected behaviours at templating level and makes |
|
825 | 825 | # it harder to extract it in a standalone function. Its |
|
826 | 826 | # behaviour cannot be changed so leave it here for now. |
|
827 | 827 | def showparents(**args): |
|
828 | 828 | ctx = args['ctx'] |
|
829 | 829 | parents = [[('rev', p.rev()), ('node', p.hex())] |
|
830 | 830 | for p in self._meaningful_parentrevs(ctx)] |
|
831 | 831 | return showlist('parent', parents, **args) |
|
832 | 832 | |
|
833 | 833 | props = props.copy() |
|
834 | 834 | props.update(templatekw.keywords) |
|
835 | 835 | props['parents'] = showparents |
|
836 | 836 | props['templ'] = self.t |
|
837 | 837 | props['ctx'] = ctx |
|
838 | 838 | props['repo'] = self.repo |
|
839 | 839 | props['revcache'] = {'copies': copies} |
|
840 | 840 | props['cache'] = self.cache |
|
841 | 841 | |
|
842 | 842 | # find correct templates for current mode |
|
843 | 843 | |
|
844 | 844 | tmplmodes = [ |
|
845 | 845 | (True, None), |
|
846 | 846 | (self.ui.verbose, 'verbose'), |
|
847 | 847 | (self.ui.quiet, 'quiet'), |
|
848 | 848 | (self.ui.debugflag, 'debug'), |
|
849 | 849 | ] |
|
850 | 850 | |
|
851 | 851 | types = {'header': '', 'footer':'', 'changeset': 'changeset'} |
|
852 | 852 | for mode, postfix in tmplmodes: |
|
853 | 853 | for type in types: |
|
854 | 854 | cur = postfix and ('%s_%s' % (type, postfix)) or type |
|
855 | 855 | if mode and cur in self.t: |
|
856 | 856 | types[type] = cur |
|
857 | 857 | |
|
858 | 858 | try: |
|
859 | 859 | |
|
860 | 860 | # write header |
|
861 | 861 | if types['header']: |
|
862 | 862 | h = templater.stringify(self.t(types['header'], **props)) |
|
863 | 863 | if self.buffered: |
|
864 | 864 | self.header[ctx.rev()] = h |
|
865 | 865 | else: |
|
866 | 866 | if self.lastheader != h: |
|
867 | 867 | self.lastheader = h |
|
868 | 868 | self.ui.write(h) |
|
869 | 869 | |
|
870 | 870 | # write changeset metadata, then patch if requested |
|
871 | 871 | key = types['changeset'] |
|
872 | 872 | self.ui.write(templater.stringify(self.t(key, **props))) |
|
873 | 873 | self.showpatch(ctx.node(), matchfn) |
|
874 | 874 | |
|
875 | 875 | if types['footer']: |
|
876 | 876 | if not self.footer: |
|
877 | 877 | self.footer = templater.stringify(self.t(types['footer'], |
|
878 | 878 | **props)) |
|
879 | 879 | |
|
880 | 880 | except KeyError, inst: |
|
881 | 881 | msg = _("%s: no key named '%s'") |
|
882 | 882 | raise util.Abort(msg % (self.t.mapfile, inst.args[0])) |
|
883 | 883 | except SyntaxError, inst: |
|
884 | 884 | raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0])) |
|
885 | 885 | |
|
886 | 886 | def show_changeset(ui, repo, opts, buffered=False): |
|
887 | 887 | """show one changeset using template or regular display. |
|
888 | 888 | |
|
889 | 889 | Display format will be the first non-empty hit of: |
|
890 | 890 | 1. option 'template' |
|
891 | 891 | 2. option 'style' |
|
892 | 892 | 3. [ui] setting 'logtemplate' |
|
893 | 893 | 4. [ui] setting 'style' |
|
894 | 894 | If all of these values are either the unset or the empty string, |
|
895 | 895 | regular display via changeset_printer() is done. |
|
896 | 896 | """ |
|
897 | 897 | # options |
|
898 | 898 | patch = False |
|
899 | 899 | if opts.get('patch') or opts.get('stat'): |
|
900 | 900 | patch = scmutil.matchall(repo) |
|
901 | 901 | |
|
902 | 902 | tmpl = opts.get('template') |
|
903 | 903 | style = None |
|
904 | 904 | if tmpl: |
|
905 | 905 | tmpl = templater.parsestring(tmpl, quoted=False) |
|
906 | 906 | else: |
|
907 | 907 | style = opts.get('style') |
|
908 | 908 | |
|
909 | 909 | # ui settings |
|
910 | 910 | if not (tmpl or style): |
|
911 | 911 | tmpl = ui.config('ui', 'logtemplate') |
|
912 | 912 | if tmpl: |
|
913 | 913 | tmpl = templater.parsestring(tmpl) |
|
914 | 914 | else: |
|
915 | 915 | style = util.expandpath(ui.config('ui', 'style', '')) |
|
916 | 916 | |
|
917 | 917 | if not (tmpl or style): |
|
918 | 918 | return changeset_printer(ui, repo, patch, opts, buffered) |
|
919 | 919 | |
|
920 | 920 | mapfile = None |
|
921 | 921 | if style and not tmpl: |
|
922 | 922 | mapfile = style |
|
923 | 923 | if not os.path.split(mapfile)[0]: |
|
924 | 924 | mapname = (templater.templatepath('map-cmdline.' + mapfile) |
|
925 | 925 | or templater.templatepath(mapfile)) |
|
926 | 926 | if mapname: |
|
927 | 927 | mapfile = mapname |
|
928 | 928 | |
|
929 | 929 | try: |
|
930 | 930 | t = changeset_templater(ui, repo, patch, opts, mapfile, buffered) |
|
931 | 931 | except SyntaxError, inst: |
|
932 | 932 | raise util.Abort(inst.args[0]) |
|
933 | 933 | if tmpl: |
|
934 | 934 | t.use_template(tmpl) |
|
935 | 935 | return t |
|
936 | 936 | |
|
937 | 937 | def finddate(ui, repo, date): |
|
938 | 938 | """Find the tipmost changeset that matches the given date spec""" |
|
939 | 939 | |
|
940 | 940 | df = util.matchdate(date) |
|
941 | 941 | m = scmutil.matchall(repo) |
|
942 | 942 | results = {} |
|
943 | 943 | |
|
944 | 944 | def prep(ctx, fns): |
|
945 | 945 | d = ctx.date() |
|
946 | 946 | if df(d[0]): |
|
947 | 947 | results[ctx.rev()] = d |
|
948 | 948 | |
|
949 | 949 | for ctx in walkchangerevs(repo, m, {'rev': None}, prep): |
|
950 | 950 | rev = ctx.rev() |
|
951 | 951 | if rev in results: |
|
952 | 952 | ui.status(_("Found revision %s from %s\n") % |
|
953 | 953 | (rev, util.datestr(results[rev]))) |
|
954 | 954 | return str(rev) |
|
955 | 955 | |
|
956 | 956 | raise util.Abort(_("revision matching date not found")) |
|
957 | 957 | |
|
958 | 958 | def walkchangerevs(repo, match, opts, prepare): |
|
959 | 959 | '''Iterate over files and the revs in which they changed. |
|
960 | 960 | |
|
961 | 961 | Callers most commonly need to iterate backwards over the history |
|
962 | 962 | in which they are interested. Doing so has awful (quadratic-looking) |
|
963 | 963 | performance, so we use iterators in a "windowed" way. |
|
964 | 964 | |
|
965 | 965 | We walk a window of revisions in the desired order. Within the |
|
966 | 966 | window, we first walk forwards to gather data, then in the desired |
|
967 | 967 | order (usually backwards) to display it. |
|
968 | 968 | |
|
969 | 969 | This function returns an iterator yielding contexts. Before |
|
970 | 970 | yielding each context, the iterator will first call the prepare |
|
971 | 971 | function on each context in the window in forward order.''' |
|
972 | 972 | |
|
973 | 973 | def increasing_windows(start, end, windowsize=8, sizelimit=512): |
|
974 | 974 | if start < end: |
|
975 | 975 | while start < end: |
|
976 | 976 | yield start, min(windowsize, end - start) |
|
977 | 977 | start += windowsize |
|
978 | 978 | if windowsize < sizelimit: |
|
979 | 979 | windowsize *= 2 |
|
980 | 980 | else: |
|
981 | 981 | while start > end: |
|
982 | 982 | yield start, min(windowsize, start - end - 1) |
|
983 | 983 | start -= windowsize |
|
984 | 984 | if windowsize < sizelimit: |
|
985 | 985 | windowsize *= 2 |
|
986 | 986 | |
|
987 | 987 | follow = opts.get('follow') or opts.get('follow_first') |
|
988 | 988 | |
|
989 | 989 | if not len(repo): |
|
990 | 990 | return [] |
|
991 | 991 | |
|
992 | 992 | if follow: |
|
993 | 993 | defrange = '%s:0' % repo['.'].rev() |
|
994 | 994 | else: |
|
995 | 995 | defrange = '-1:0' |
|
996 | 996 | revs = scmutil.revrange(repo, opts['rev'] or [defrange]) |
|
997 | 997 | if not revs: |
|
998 | 998 | return [] |
|
999 | 999 | wanted = set() |
|
1000 | 1000 | slowpath = match.anypats() or (match.files() and opts.get('removed')) |
|
1001 | 1001 | fncache = {} |
|
1002 | 1002 | change = repo.changectx |
|
1003 | 1003 | |
|
1004 | 1004 | # First step is to fill wanted, the set of revisions that we want to yield. |
|
1005 | 1005 | # When it does not induce extra cost, we also fill fncache for revisions in |
|
1006 | 1006 | # wanted: a cache of filenames that were changed (ctx.files()) and that |
|
1007 | 1007 | # match the file filtering conditions. |
|
1008 | 1008 | |
|
1009 | 1009 | if not slowpath and not match.files(): |
|
1010 | 1010 | # No files, no patterns. Display all revs. |
|
1011 | 1011 | wanted = set(revs) |
|
1012 | 1012 | copies = [] |
|
1013 | 1013 | |
|
1014 | 1014 | if not slowpath and match.files(): |
|
1015 | 1015 | # We only have to read through the filelog to find wanted revisions |
|
1016 | 1016 | |
|
1017 | 1017 | minrev, maxrev = min(revs), max(revs) |
|
1018 | 1018 | def filerevgen(filelog, last): |
|
1019 | 1019 | """ |
|
1020 | 1020 | Only files, no patterns. Check the history of each file. |
|
1021 | 1021 | |
|
1022 | 1022 | Examines filelog entries within minrev, maxrev linkrev range |
|
1023 | 1023 | Returns an iterator yielding (linkrev, parentlinkrevs, copied) |
|
1024 | 1024 | tuples in backwards order |
|
1025 | 1025 | """ |
|
1026 | 1026 | cl_count = len(repo) |
|
1027 | 1027 | revs = [] |
|
1028 | 1028 | for j in xrange(0, last + 1): |
|
1029 | 1029 | linkrev = filelog.linkrev(j) |
|
1030 | 1030 | if linkrev < minrev: |
|
1031 | 1031 | continue |
|
1032 | 1032 | # only yield rev for which we have the changelog, it can |
|
1033 | 1033 | # happen while doing "hg log" during a pull or commit |
|
1034 | 1034 | if linkrev >= cl_count: |
|
1035 | 1035 | break |
|
1036 | 1036 | |
|
1037 | 1037 | parentlinkrevs = [] |
|
1038 | 1038 | for p in filelog.parentrevs(j): |
|
1039 | 1039 | if p != nullrev: |
|
1040 | 1040 | parentlinkrevs.append(filelog.linkrev(p)) |
|
1041 | 1041 | n = filelog.node(j) |
|
1042 | 1042 | revs.append((linkrev, parentlinkrevs, |
|
1043 | 1043 | follow and filelog.renamed(n))) |
|
1044 | 1044 | |
|
1045 | 1045 | return reversed(revs) |
|
1046 | 1046 | def iterfiles(): |
|
1047 | 1047 | pctx = repo['.'] |
|
1048 | 1048 | for filename in match.files(): |
|
1049 | 1049 | if follow: |
|
1050 | 1050 | if filename not in pctx: |
|
1051 | 1051 | raise util.Abort(_('cannot follow file not in parent ' |
|
1052 | 1052 | 'revision: "%s"') % filename) |
|
1053 | 1053 | yield filename, pctx[filename].filenode() |
|
1054 | 1054 | else: |
|
1055 | 1055 | yield filename, None |
|
1056 | 1056 | for filename_node in copies: |
|
1057 | 1057 | yield filename_node |
|
1058 | 1058 | for file_, node in iterfiles(): |
|
1059 | 1059 | filelog = repo.file(file_) |
|
1060 | 1060 | if not len(filelog): |
|
1061 | 1061 | if node is None: |
|
1062 | 1062 | # A zero count may be a directory or deleted file, so |
|
1063 | 1063 | # try to find matching entries on the slow path. |
|
1064 | 1064 | if follow: |
|
1065 | 1065 | raise util.Abort( |
|
1066 | 1066 | _('cannot follow nonexistent file: "%s"') % file_) |
|
1067 | 1067 | slowpath = True |
|
1068 | 1068 | break |
|
1069 | 1069 | else: |
|
1070 | 1070 | continue |
|
1071 | 1071 | |
|
1072 | 1072 | if node is None: |
|
1073 | 1073 | last = len(filelog) - 1 |
|
1074 | 1074 | else: |
|
1075 | 1075 | last = filelog.rev(node) |
|
1076 | 1076 | |
|
1077 | 1077 | |
|
1078 | 1078 | # keep track of all ancestors of the file |
|
1079 | 1079 | ancestors = set([filelog.linkrev(last)]) |
|
1080 | 1080 | |
|
1081 | 1081 | # iterate from latest to oldest revision |
|
1082 | 1082 | for rev, flparentlinkrevs, copied in filerevgen(filelog, last): |
|
1083 | 1083 | if not follow: |
|
1084 | 1084 | if rev > maxrev: |
|
1085 | 1085 | continue |
|
1086 | 1086 | else: |
|
1087 | 1087 | # Note that last might not be the first interesting |
|
1088 | 1088 | # rev to us: |
|
1089 | 1089 | # if the file has been changed after maxrev, we'll |
|
1090 | 1090 | # have linkrev(last) > maxrev, and we still need |
|
1091 | 1091 | # to explore the file graph |
|
1092 | 1092 | if rev not in ancestors: |
|
1093 | 1093 | continue |
|
1094 | 1094 | # XXX insert 1327 fix here |
|
1095 | 1095 | if flparentlinkrevs: |
|
1096 | 1096 | ancestors.update(flparentlinkrevs) |
|
1097 | 1097 | |
|
1098 | 1098 | fncache.setdefault(rev, []).append(file_) |
|
1099 | 1099 | wanted.add(rev) |
|
1100 | 1100 | if copied: |
|
1101 | 1101 | copies.append(copied) |
|
1102 | 1102 | if slowpath: |
|
1103 | 1103 | # We have to read the changelog to match filenames against |
|
1104 | 1104 | # changed files |
|
1105 | 1105 | |
|
1106 | 1106 | if follow: |
|
1107 | 1107 | raise util.Abort(_('can only follow copies/renames for explicit ' |
|
1108 | 1108 | 'filenames')) |
|
1109 | 1109 | |
|
1110 | 1110 | # The slow path checks files modified in every changeset. |
|
1111 | 1111 | for i in sorted(revs): |
|
1112 | 1112 | ctx = change(i) |
|
1113 | 1113 | matches = filter(match, ctx.files()) |
|
1114 | 1114 | if matches: |
|
1115 | 1115 | fncache[i] = matches |
|
1116 | 1116 | wanted.add(i) |
|
1117 | 1117 | |
|
1118 | 1118 | class followfilter(object): |
|
1119 | 1119 | def __init__(self, onlyfirst=False): |
|
1120 | 1120 | self.startrev = nullrev |
|
1121 | 1121 | self.roots = set() |
|
1122 | 1122 | self.onlyfirst = onlyfirst |
|
1123 | 1123 | |
|
1124 | 1124 | def match(self, rev): |
|
1125 | 1125 | def realparents(rev): |
|
1126 | 1126 | if self.onlyfirst: |
|
1127 | 1127 | return repo.changelog.parentrevs(rev)[0:1] |
|
1128 | 1128 | else: |
|
1129 | 1129 | return filter(lambda x: x != nullrev, |
|
1130 | 1130 | repo.changelog.parentrevs(rev)) |
|
1131 | 1131 | |
|
1132 | 1132 | if self.startrev == nullrev: |
|
1133 | 1133 | self.startrev = rev |
|
1134 | 1134 | return True |
|
1135 | 1135 | |
|
1136 | 1136 | if rev > self.startrev: |
|
1137 | 1137 | # forward: all descendants |
|
1138 | 1138 | if not self.roots: |
|
1139 | 1139 | self.roots.add(self.startrev) |
|
1140 | 1140 | for parent in realparents(rev): |
|
1141 | 1141 | if parent in self.roots: |
|
1142 | 1142 | self.roots.add(rev) |
|
1143 | 1143 | return True |
|
1144 | 1144 | else: |
|
1145 | 1145 | # backwards: all parents |
|
1146 | 1146 | if not self.roots: |
|
1147 | 1147 | self.roots.update(realparents(self.startrev)) |
|
1148 | 1148 | if rev in self.roots: |
|
1149 | 1149 | self.roots.remove(rev) |
|
1150 | 1150 | self.roots.update(realparents(rev)) |
|
1151 | 1151 | return True |
|
1152 | 1152 | |
|
1153 | 1153 | return False |
|
1154 | 1154 | |
|
1155 | 1155 | # it might be worthwhile to do this in the iterator if the rev range |
|
1156 | 1156 | # is descending and the prune args are all within that range |
|
1157 | 1157 | for rev in opts.get('prune', ()): |
|
1158 | 1158 | rev = repo[rev].rev() |
|
1159 | 1159 | ff = followfilter() |
|
1160 | 1160 | stop = min(revs[0], revs[-1]) |
|
1161 | 1161 | for x in xrange(rev, stop - 1, -1): |
|
1162 | 1162 | if ff.match(x): |
|
1163 | 1163 | wanted.discard(x) |
|
1164 | 1164 | |
|
1165 | 1165 | # Now that wanted is correctly initialized, we can iterate over the |
|
1166 | 1166 | # revision range, yielding only revisions in wanted. |
|
1167 | 1167 | def iterate(): |
|
1168 | 1168 | if follow and not match.files(): |
|
1169 | 1169 | ff = followfilter(onlyfirst=opts.get('follow_first')) |
|
1170 | 1170 | def want(rev): |
|
1171 | 1171 | return ff.match(rev) and rev in wanted |
|
1172 | 1172 | else: |
|
1173 | 1173 | def want(rev): |
|
1174 | 1174 | return rev in wanted |
|
1175 | 1175 | |
|
1176 | 1176 | for i, window in increasing_windows(0, len(revs)): |
|
1177 | 1177 | nrevs = [rev for rev in revs[i:i + window] if want(rev)] |
|
1178 | 1178 | for rev in sorted(nrevs): |
|
1179 | 1179 | fns = fncache.get(rev) |
|
1180 | 1180 | ctx = change(rev) |
|
1181 | 1181 | if not fns: |
|
1182 | 1182 | def fns_generator(): |
|
1183 | 1183 | for f in ctx.files(): |
|
1184 | 1184 | if match(f): |
|
1185 | 1185 | yield f |
|
1186 | 1186 | fns = fns_generator() |
|
1187 | 1187 | prepare(ctx, fns) |
|
1188 | 1188 | for rev in nrevs: |
|
1189 | 1189 | yield change(rev) |
|
1190 | 1190 | return iterate() |
|
1191 | 1191 | |
|
1192 | 1192 | def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly): |
|
1193 | 1193 | join = lambda f: os.path.join(prefix, f) |
|
1194 | 1194 | bad = [] |
|
1195 | 1195 | oldbad = match.bad |
|
1196 | 1196 | match.bad = lambda x, y: bad.append(x) or oldbad(x, y) |
|
1197 | 1197 | names = [] |
|
1198 | 1198 | wctx = repo[None] |
|
1199 | 1199 | cca = None |
|
1200 | 1200 | abort, warn = scmutil.checkportabilityalert(ui) |
|
1201 | 1201 | if abort or warn: |
|
1202 | 1202 | cca = scmutil.casecollisionauditor(ui, abort, wctx) |
|
1203 | 1203 | for f in repo.walk(match): |
|
1204 | 1204 | exact = match.exact(f) |
|
1205 | 1205 | if exact or not explicitonly and f not in repo.dirstate: |
|
1206 | 1206 | if cca: |
|
1207 | 1207 | cca(f) |
|
1208 | 1208 | names.append(f) |
|
1209 | 1209 | if ui.verbose or not exact: |
|
1210 | 1210 | ui.status(_('adding %s\n') % match.rel(join(f))) |
|
1211 | 1211 | |
|
1212 | 1212 | for subpath in wctx.substate: |
|
1213 | 1213 | sub = wctx.sub(subpath) |
|
1214 | 1214 | try: |
|
1215 | 1215 | submatch = matchmod.narrowmatcher(subpath, match) |
|
1216 | 1216 | if listsubrepos: |
|
1217 | 1217 | bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix, |
|
1218 | 1218 | False)) |
|
1219 | 1219 | else: |
|
1220 | 1220 | bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix, |
|
1221 | 1221 | True)) |
|
1222 | 1222 | except error.LookupError: |
|
1223 | 1223 | ui.status(_("skipping missing subrepository: %s\n") |
|
1224 | 1224 | % join(subpath)) |
|
1225 | 1225 | |
|
1226 | 1226 | if not dryrun: |
|
1227 | 1227 | rejected = wctx.add(names, prefix) |
|
1228 | 1228 | bad.extend(f for f in rejected if f in match.files()) |
|
1229 | 1229 | return bad |
|
1230 | 1230 | |
|
1231 | 1231 | def forget(ui, repo, match, prefix, explicitonly): |
|
1232 | 1232 | join = lambda f: os.path.join(prefix, f) |
|
1233 | 1233 | bad = [] |
|
1234 | 1234 | oldbad = match.bad |
|
1235 | 1235 | match.bad = lambda x, y: bad.append(x) or oldbad(x, y) |
|
1236 | 1236 | wctx = repo[None] |
|
1237 | 1237 | forgot = [] |
|
1238 | 1238 | s = repo.status(match=match, clean=True) |
|
1239 | 1239 | forget = sorted(s[0] + s[1] + s[3] + s[6]) |
|
1240 | 1240 | if explicitonly: |
|
1241 | 1241 | forget = [f for f in forget if match.exact(f)] |
|
1242 | 1242 | |
|
1243 | 1243 | for subpath in wctx.substate: |
|
1244 | 1244 | sub = wctx.sub(subpath) |
|
1245 | 1245 | try: |
|
1246 | 1246 | submatch = matchmod.narrowmatcher(subpath, match) |
|
1247 | 1247 | subbad, subforgot = sub.forget(ui, submatch, prefix) |
|
1248 | 1248 | bad.extend([subpath + '/' + f for f in subbad]) |
|
1249 | 1249 | forgot.extend([subpath + '/' + f for f in subforgot]) |
|
1250 | 1250 | except error.LookupError: |
|
1251 | 1251 | ui.status(_("skipping missing subrepository: %s\n") |
|
1252 | 1252 | % join(subpath)) |
|
1253 | 1253 | |
|
1254 | 1254 | if not explicitonly: |
|
1255 | 1255 | for f in match.files(): |
|
1256 | 1256 | if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))): |
|
1257 | 1257 | if f not in forgot: |
|
1258 | 1258 | if os.path.exists(match.rel(join(f))): |
|
1259 | 1259 | ui.warn(_('not removing %s: ' |
|
1260 | 1260 | 'file is already untracked\n') |
|
1261 | 1261 | % match.rel(join(f))) |
|
1262 | 1262 | bad.append(f) |
|
1263 | 1263 | |
|
1264 | 1264 | for f in forget: |
|
1265 | 1265 | if ui.verbose or not match.exact(f): |
|
1266 | 1266 | ui.status(_('removing %s\n') % match.rel(join(f))) |
|
1267 | 1267 | |
|
1268 | 1268 | rejected = wctx.forget(forget, prefix) |
|
1269 | 1269 | bad.extend(f for f in rejected if f in match.files()) |
|
1270 | 1270 | forgot.extend(forget) |
|
1271 | 1271 | return bad, forgot |
|
1272 | 1272 | |
|
1273 | 1273 | def duplicatecopies(repo, rev, p1): |
|
1274 | 1274 | "Reproduce copies found in the source revision in the dirstate for grafts" |
|
1275 | 1275 | for dst, src in copies.pathcopies(repo[p1], repo[rev]).iteritems(): |
|
1276 | 1276 | repo.dirstate.copy(src, dst) |
|
1277 | 1277 | |
|
1278 | 1278 | def commit(ui, repo, commitfunc, pats, opts): |
|
1279 | 1279 | '''commit the specified files or all outstanding changes''' |
|
1280 | 1280 | date = opts.get('date') |
|
1281 | 1281 | if date: |
|
1282 | 1282 | opts['date'] = util.parsedate(date) |
|
1283 | 1283 | message = logmessage(ui, opts) |
|
1284 | 1284 | |
|
1285 | 1285 | # extract addremove carefully -- this function can be called from a command |
|
1286 | 1286 | # that doesn't support addremove |
|
1287 | 1287 | if opts.get('addremove'): |
|
1288 | 1288 | scmutil.addremove(repo, pats, opts) |
|
1289 | 1289 | |
|
1290 | 1290 | return commitfunc(ui, repo, message, |
|
1291 | 1291 | scmutil.match(repo[None], pats, opts), opts) |
|
1292 | 1292 | |
|
1293 | 1293 | def amend(ui, repo, commitfunc, old, extra, pats, opts): |
|
1294 | 1294 | ui.note(_('amending changeset %s\n') % old) |
|
1295 | 1295 | base = old.p1() |
|
1296 | 1296 | |
|
1297 | 1297 | wlock = repo.wlock() |
|
1298 | 1298 | try: |
|
1299 | 1299 | # First, do a regular commit to record all changes in the working |
|
1300 | 1300 | # directory (if there are any) |
|
1301 | 1301 | node = commit(ui, repo, commitfunc, pats, opts) |
|
1302 | 1302 | ctx = repo[node] |
|
1303 | 1303 | |
|
1304 | 1304 | # Participating changesets: |
|
1305 | 1305 | # |
|
1306 | 1306 | # node/ctx o - new (intermediate) commit that contains changes from |
|
1307 | 1307 | # | working dir to go into amending commit (or a workingctx |
|
1308 | 1308 | # | if there were no changes) |
|
1309 | 1309 | # | |
|
1310 | 1310 | # old o - changeset to amend |
|
1311 | 1311 | # | |
|
1312 | 1312 | # base o - parent of amending changeset |
|
1313 | 1313 | |
|
1314 | 1314 | # Update extra dict from amended commit (e.g. to preserve graft source) |
|
1315 | 1315 | extra.update(old.extra()) |
|
1316 | 1316 | |
|
1317 | 1317 | # Also update it from the intermediate commit or from the wctx |
|
1318 | 1318 | extra.update(ctx.extra()) |
|
1319 | 1319 | |
|
1320 | 1320 | files = set(old.files()) |
|
1321 | 1321 | |
|
1322 | 1322 | # Second, we use either the commit we just did, or if there were no |
|
1323 | 1323 | # changes the parent of the working directory as the version of the |
|
1324 | 1324 | # files in the final amend commit |
|
1325 | 1325 | if node: |
|
1326 | 1326 | ui.note(_('copying changeset %s to %s\n') % (ctx, base)) |
|
1327 | 1327 | |
|
1328 | 1328 | user = ctx.user() |
|
1329 | 1329 | date = ctx.date() |
|
1330 | 1330 | message = ctx.description() |
|
1331 | 1331 | # Recompute copies (avoid recording a -> b -> a) |
|
1332 | 1332 | copied = copies.pathcopies(base, ctx) |
|
1333 | 1333 | |
|
1334 | 1334 | # Prune files which were reverted by the updates: if old introduced |
|
1335 | 1335 | # file X and our intermediate commit, node, renamed that file, then |
|
1336 | 1336 | # those two files are the same and we can discard X from our list |
|
1337 | 1337 | # of files. Likewise if X was deleted, it's no longer relevant |
|
1338 | 1338 | files.update(ctx.files()) |
|
1339 | 1339 | |
|
1340 | 1340 | def samefile(f): |
|
1341 | 1341 | if f in ctx.manifest(): |
|
1342 | 1342 | a = ctx.filectx(f) |
|
1343 | 1343 | if f in base.manifest(): |
|
1344 | 1344 | b = base.filectx(f) |
|
1345 | 1345 | return (a.data() == b.data() |
|
1346 | 1346 | and a.flags() == b.flags()) |
|
1347 | 1347 | else: |
|
1348 | 1348 | return False |
|
1349 | 1349 | else: |
|
1350 | 1350 | return f not in base.manifest() |
|
1351 | 1351 | files = [f for f in files if not samefile(f)] |
|
1352 | 1352 | |
|
1353 | 1353 | def filectxfn(repo, ctx_, path): |
|
1354 | 1354 | try: |
|
1355 | 1355 | fctx = ctx[path] |
|
1356 | 1356 | flags = fctx.flags() |
|
1357 | 1357 | mctx = context.memfilectx(fctx.path(), fctx.data(), |
|
1358 | 1358 | islink='l' in flags, |
|
1359 | 1359 | isexec='x' in flags, |
|
1360 | 1360 | copied=copied.get(path)) |
|
1361 | 1361 | return mctx |
|
1362 | 1362 | except KeyError: |
|
1363 |
raise IOError |
|
|
1363 | raise IOError | |
|
1364 | 1364 | else: |
|
1365 | 1365 | ui.note(_('copying changeset %s to %s\n') % (old, base)) |
|
1366 | 1366 | |
|
1367 | 1367 | # Use version of files as in the old cset |
|
1368 | 1368 | def filectxfn(repo, ctx_, path): |
|
1369 | 1369 | try: |
|
1370 | 1370 | return old.filectx(path) |
|
1371 | 1371 | except KeyError: |
|
1372 |
raise IOError |
|
|
1372 | raise IOError | |
|
1373 | 1373 | |
|
1374 | 1374 | # See if we got a message from -m or -l, if not, open the editor |
|
1375 | 1375 | # with the message of the changeset to amend |
|
1376 | 1376 | user = opts.get('user') or old.user() |
|
1377 | 1377 | date = opts.get('date') or old.date() |
|
1378 | 1378 | message = logmessage(ui, opts) |
|
1379 | 1379 | if not message: |
|
1380 | 1380 | cctx = context.workingctx(repo, old.description(), user, date, |
|
1381 | 1381 | extra, |
|
1382 | 1382 | repo.status(base.node(), old.node())) |
|
1383 | 1383 | message = commitforceeditor(repo, cctx, []) |
|
1384 | 1384 | |
|
1385 | 1385 | new = context.memctx(repo, |
|
1386 | 1386 | parents=[base.node(), nullid], |
|
1387 | 1387 | text=message, |
|
1388 | 1388 | files=files, |
|
1389 | 1389 | filectxfn=filectxfn, |
|
1390 | 1390 | user=user, |
|
1391 | 1391 | date=date, |
|
1392 | 1392 | extra=extra) |
|
1393 | 1393 | newid = repo.commitctx(new) |
|
1394 | 1394 | if newid != old.node(): |
|
1395 | 1395 | # Reroute the working copy parent to the new changeset |
|
1396 | 1396 | repo.setparents(newid, nullid) |
|
1397 | 1397 | |
|
1398 | 1398 | # Move bookmarks from old parent to amend commit |
|
1399 | 1399 | bms = repo.nodebookmarks(old.node()) |
|
1400 | 1400 | if bms: |
|
1401 | 1401 | for bm in bms: |
|
1402 | 1402 | repo._bookmarks[bm] = newid |
|
1403 | 1403 | bookmarks.write(repo) |
|
1404 | 1404 | |
|
1405 | 1405 | # Strip the intermediate commit (if there was one) and the amended |
|
1406 | 1406 | # commit |
|
1407 | 1407 | lock = repo.lock() |
|
1408 | 1408 | try: |
|
1409 | 1409 | if node: |
|
1410 | 1410 | ui.note(_('stripping intermediate changeset %s\n') % ctx) |
|
1411 | 1411 | ui.note(_('stripping amended changeset %s\n') % old) |
|
1412 | 1412 | repair.strip(ui, repo, old.node(), topic='amend-backup') |
|
1413 | 1413 | finally: |
|
1414 | 1414 | lock.release() |
|
1415 | 1415 | finally: |
|
1416 | 1416 | wlock.release() |
|
1417 | 1417 | return newid |
|
1418 | 1418 | |
|
1419 | 1419 | def commiteditor(repo, ctx, subs): |
|
1420 | 1420 | if ctx.description(): |
|
1421 | 1421 | return ctx.description() |
|
1422 | 1422 | return commitforceeditor(repo, ctx, subs) |
|
1423 | 1423 | |
|
1424 | 1424 | def commitforceeditor(repo, ctx, subs): |
|
1425 | 1425 | edittext = [] |
|
1426 | 1426 | modified, added, removed = ctx.modified(), ctx.added(), ctx.removed() |
|
1427 | 1427 | if ctx.description(): |
|
1428 | 1428 | edittext.append(ctx.description()) |
|
1429 | 1429 | edittext.append("") |
|
1430 | 1430 | edittext.append("") # Empty line between message and comments. |
|
1431 | 1431 | edittext.append(_("HG: Enter commit message." |
|
1432 | 1432 | " Lines beginning with 'HG:' are removed.")) |
|
1433 | 1433 | edittext.append(_("HG: Leave message empty to abort commit.")) |
|
1434 | 1434 | edittext.append("HG: --") |
|
1435 | 1435 | edittext.append(_("HG: user: %s") % ctx.user()) |
|
1436 | 1436 | if ctx.p2(): |
|
1437 | 1437 | edittext.append(_("HG: branch merge")) |
|
1438 | 1438 | if ctx.branch(): |
|
1439 | 1439 | edittext.append(_("HG: branch '%s'") % ctx.branch()) |
|
1440 | 1440 | edittext.extend([_("HG: subrepo %s") % s for s in subs]) |
|
1441 | 1441 | edittext.extend([_("HG: added %s") % f for f in added]) |
|
1442 | 1442 | edittext.extend([_("HG: changed %s") % f for f in modified]) |
|
1443 | 1443 | edittext.extend([_("HG: removed %s") % f for f in removed]) |
|
1444 | 1444 | if not added and not modified and not removed: |
|
1445 | 1445 | edittext.append(_("HG: no files changed")) |
|
1446 | 1446 | edittext.append("") |
|
1447 | 1447 | # run editor in the repository root |
|
1448 | 1448 | olddir = os.getcwd() |
|
1449 | 1449 | os.chdir(repo.root) |
|
1450 | 1450 | text = repo.ui.edit("\n".join(edittext), ctx.user()) |
|
1451 | 1451 | text = re.sub("(?m)^HG:.*(\n|$)", "", text) |
|
1452 | 1452 | os.chdir(olddir) |
|
1453 | 1453 | |
|
1454 | 1454 | if not text.strip(): |
|
1455 | 1455 | raise util.Abort(_("empty commit message")) |
|
1456 | 1456 | |
|
1457 | 1457 | return text |
|
1458 | 1458 | |
|
1459 | 1459 | def revert(ui, repo, ctx, parents, *pats, **opts): |
|
1460 | 1460 | parent, p2 = parents |
|
1461 | 1461 | node = ctx.node() |
|
1462 | 1462 | |
|
1463 | 1463 | mf = ctx.manifest() |
|
1464 | 1464 | if node == parent: |
|
1465 | 1465 | pmf = mf |
|
1466 | 1466 | else: |
|
1467 | 1467 | pmf = None |
|
1468 | 1468 | |
|
1469 | 1469 | # need all matching names in dirstate and manifest of target rev, |
|
1470 | 1470 | # so have to walk both. do not print errors if files exist in one |
|
1471 | 1471 | # but not other. |
|
1472 | 1472 | |
|
1473 | 1473 | names = {} |
|
1474 | 1474 | |
|
1475 | 1475 | wlock = repo.wlock() |
|
1476 | 1476 | try: |
|
1477 | 1477 | # walk dirstate. |
|
1478 | 1478 | |
|
1479 | 1479 | m = scmutil.match(repo[None], pats, opts) |
|
1480 | 1480 | m.bad = lambda x, y: False |
|
1481 | 1481 | for abs in repo.walk(m): |
|
1482 | 1482 | names[abs] = m.rel(abs), m.exact(abs) |
|
1483 | 1483 | |
|
1484 | 1484 | # walk target manifest. |
|
1485 | 1485 | |
|
1486 | 1486 | def badfn(path, msg): |
|
1487 | 1487 | if path in names: |
|
1488 | 1488 | return |
|
1489 | 1489 | if path in ctx.substate: |
|
1490 | 1490 | return |
|
1491 | 1491 | path_ = path + '/' |
|
1492 | 1492 | for f in names: |
|
1493 | 1493 | if f.startswith(path_): |
|
1494 | 1494 | return |
|
1495 | 1495 | ui.warn("%s: %s\n" % (m.rel(path), msg)) |
|
1496 | 1496 | |
|
1497 | 1497 | m = scmutil.match(ctx, pats, opts) |
|
1498 | 1498 | m.bad = badfn |
|
1499 | 1499 | for abs in ctx.walk(m): |
|
1500 | 1500 | if abs not in names: |
|
1501 | 1501 | names[abs] = m.rel(abs), m.exact(abs) |
|
1502 | 1502 | |
|
1503 | 1503 | # get the list of subrepos that must be reverted |
|
1504 | 1504 | targetsubs = [s for s in ctx.substate if m(s)] |
|
1505 | 1505 | m = scmutil.matchfiles(repo, names) |
|
1506 | 1506 | changes = repo.status(match=m)[:4] |
|
1507 | 1507 | modified, added, removed, deleted = map(set, changes) |
|
1508 | 1508 | |
|
1509 | 1509 | # if f is a rename, also revert the source |
|
1510 | 1510 | cwd = repo.getcwd() |
|
1511 | 1511 | for f in added: |
|
1512 | 1512 | src = repo.dirstate.copied(f) |
|
1513 | 1513 | if src and src not in names and repo.dirstate[src] == 'r': |
|
1514 | 1514 | removed.add(src) |
|
1515 | 1515 | names[src] = (repo.pathto(src, cwd), True) |
|
1516 | 1516 | |
|
1517 | 1517 | def removeforget(abs): |
|
1518 | 1518 | if repo.dirstate[abs] == 'a': |
|
1519 | 1519 | return _('forgetting %s\n') |
|
1520 | 1520 | return _('removing %s\n') |
|
1521 | 1521 | |
|
1522 | 1522 | revert = ([], _('reverting %s\n')) |
|
1523 | 1523 | add = ([], _('adding %s\n')) |
|
1524 | 1524 | remove = ([], removeforget) |
|
1525 | 1525 | undelete = ([], _('undeleting %s\n')) |
|
1526 | 1526 | |
|
1527 | 1527 | disptable = ( |
|
1528 | 1528 | # dispatch table: |
|
1529 | 1529 | # file state |
|
1530 | 1530 | # action if in target manifest |
|
1531 | 1531 | # action if not in target manifest |
|
1532 | 1532 | # make backup if in target manifest |
|
1533 | 1533 | # make backup if not in target manifest |
|
1534 | 1534 | (modified, revert, remove, True, True), |
|
1535 | 1535 | (added, revert, remove, True, False), |
|
1536 | 1536 | (removed, undelete, None, False, False), |
|
1537 | 1537 | (deleted, revert, remove, False, False), |
|
1538 | 1538 | ) |
|
1539 | 1539 | |
|
1540 | 1540 | for abs, (rel, exact) in sorted(names.items()): |
|
1541 | 1541 | mfentry = mf.get(abs) |
|
1542 | 1542 | target = repo.wjoin(abs) |
|
1543 | 1543 | def handle(xlist, dobackup): |
|
1544 | 1544 | xlist[0].append(abs) |
|
1545 | 1545 | if (dobackup and not opts.get('no_backup') and |
|
1546 | 1546 | os.path.lexists(target)): |
|
1547 | 1547 | bakname = "%s.orig" % rel |
|
1548 | 1548 | ui.note(_('saving current version of %s as %s\n') % |
|
1549 | 1549 | (rel, bakname)) |
|
1550 | 1550 | if not opts.get('dry_run'): |
|
1551 | 1551 | util.rename(target, bakname) |
|
1552 | 1552 | if ui.verbose or not exact: |
|
1553 | 1553 | msg = xlist[1] |
|
1554 | 1554 | if not isinstance(msg, basestring): |
|
1555 | 1555 | msg = msg(abs) |
|
1556 | 1556 | ui.status(msg % rel) |
|
1557 | 1557 | for table, hitlist, misslist, backuphit, backupmiss in disptable: |
|
1558 | 1558 | if abs not in table: |
|
1559 | 1559 | continue |
|
1560 | 1560 | # file has changed in dirstate |
|
1561 | 1561 | if mfentry: |
|
1562 | 1562 | handle(hitlist, backuphit) |
|
1563 | 1563 | elif misslist is not None: |
|
1564 | 1564 | handle(misslist, backupmiss) |
|
1565 | 1565 | break |
|
1566 | 1566 | else: |
|
1567 | 1567 | if abs not in repo.dirstate: |
|
1568 | 1568 | if mfentry: |
|
1569 | 1569 | handle(add, True) |
|
1570 | 1570 | elif exact: |
|
1571 | 1571 | ui.warn(_('file not managed: %s\n') % rel) |
|
1572 | 1572 | continue |
|
1573 | 1573 | # file has not changed in dirstate |
|
1574 | 1574 | if node == parent: |
|
1575 | 1575 | if exact: |
|
1576 | 1576 | ui.warn(_('no changes needed to %s\n') % rel) |
|
1577 | 1577 | continue |
|
1578 | 1578 | if pmf is None: |
|
1579 | 1579 | # only need parent manifest in this unlikely case, |
|
1580 | 1580 | # so do not read by default |
|
1581 | 1581 | pmf = repo[parent].manifest() |
|
1582 | 1582 | if abs in pmf and mfentry: |
|
1583 | 1583 | # if version of file is same in parent and target |
|
1584 | 1584 | # manifests, do nothing |
|
1585 | 1585 | if (pmf[abs] != mfentry or |
|
1586 | 1586 | pmf.flags(abs) != mf.flags(abs)): |
|
1587 | 1587 | handle(revert, False) |
|
1588 | 1588 | else: |
|
1589 | 1589 | handle(remove, False) |
|
1590 | 1590 | |
|
1591 | 1591 | if not opts.get('dry_run'): |
|
1592 | 1592 | def checkout(f): |
|
1593 | 1593 | fc = ctx[f] |
|
1594 | 1594 | repo.wwrite(f, fc.data(), fc.flags()) |
|
1595 | 1595 | |
|
1596 | 1596 | audit_path = scmutil.pathauditor(repo.root) |
|
1597 | 1597 | for f in remove[0]: |
|
1598 | 1598 | if repo.dirstate[f] == 'a': |
|
1599 | 1599 | repo.dirstate.drop(f) |
|
1600 | 1600 | continue |
|
1601 | 1601 | audit_path(f) |
|
1602 | 1602 | try: |
|
1603 | 1603 | util.unlinkpath(repo.wjoin(f)) |
|
1604 | 1604 | except OSError: |
|
1605 | 1605 | pass |
|
1606 | 1606 | repo.dirstate.remove(f) |
|
1607 | 1607 | |
|
1608 | 1608 | normal = None |
|
1609 | 1609 | if node == parent: |
|
1610 | 1610 | # We're reverting to our parent. If possible, we'd like status |
|
1611 | 1611 | # to report the file as clean. We have to use normallookup for |
|
1612 | 1612 | # merges to avoid losing information about merged/dirty files. |
|
1613 | 1613 | if p2 != nullid: |
|
1614 | 1614 | normal = repo.dirstate.normallookup |
|
1615 | 1615 | else: |
|
1616 | 1616 | normal = repo.dirstate.normal |
|
1617 | 1617 | for f in revert[0]: |
|
1618 | 1618 | checkout(f) |
|
1619 | 1619 | if normal: |
|
1620 | 1620 | normal(f) |
|
1621 | 1621 | |
|
1622 | 1622 | for f in add[0]: |
|
1623 | 1623 | checkout(f) |
|
1624 | 1624 | repo.dirstate.add(f) |
|
1625 | 1625 | |
|
1626 | 1626 | normal = repo.dirstate.normallookup |
|
1627 | 1627 | if node == parent and p2 == nullid: |
|
1628 | 1628 | normal = repo.dirstate.normal |
|
1629 | 1629 | for f in undelete[0]: |
|
1630 | 1630 | checkout(f) |
|
1631 | 1631 | normal(f) |
|
1632 | 1632 | |
|
1633 | 1633 | if targetsubs: |
|
1634 | 1634 | # Revert the subrepos on the revert list |
|
1635 | 1635 | for sub in targetsubs: |
|
1636 | 1636 | ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts) |
|
1637 | 1637 | finally: |
|
1638 | 1638 | wlock.release() |
|
1639 | 1639 | |
|
1640 | 1640 | def command(table): |
|
1641 | 1641 | '''returns a function object bound to table which can be used as |
|
1642 | 1642 | a decorator for populating table as a command table''' |
|
1643 | 1643 | |
|
1644 | 1644 | def cmd(name, options, synopsis=None): |
|
1645 | 1645 | def decorator(func): |
|
1646 | 1646 | if synopsis: |
|
1647 | 1647 | table[name] = func, options[:], synopsis |
|
1648 | 1648 | else: |
|
1649 | 1649 | table[name] = func, options[:] |
|
1650 | 1650 | return func |
|
1651 | 1651 | return decorator |
|
1652 | 1652 | |
|
1653 | 1653 | return cmd |
@@ -1,238 +1,238 | |||
|
1 | 1 | # commandserver.py - communicate with Mercurial's API over a pipe |
|
2 | 2 | # |
|
3 | 3 | # Copyright Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | 9 | import struct |
|
10 | 10 | import sys, os |
|
11 | 11 | import dispatch, encoding, util |
|
12 | 12 | |
|
13 | 13 | logfile = None |
|
14 | 14 | |
|
15 | 15 | def log(*args): |
|
16 | 16 | if not logfile: |
|
17 | 17 | return |
|
18 | 18 | |
|
19 | 19 | for a in args: |
|
20 | 20 | logfile.write(str(a)) |
|
21 | 21 | |
|
22 | 22 | logfile.flush() |
|
23 | 23 | |
|
24 | 24 | class channeledoutput(object): |
|
25 | 25 | """ |
|
26 | 26 | Write data from in_ to out in the following format: |
|
27 | 27 | |
|
28 | 28 | data length (unsigned int), |
|
29 | 29 | data |
|
30 | 30 | """ |
|
31 | 31 | def __init__(self, in_, out, channel): |
|
32 | 32 | self.in_ = in_ |
|
33 | 33 | self.out = out |
|
34 | 34 | self.channel = channel |
|
35 | 35 | |
|
36 | 36 | def write(self, data): |
|
37 | 37 | if not data: |
|
38 | 38 | return |
|
39 | 39 | self.out.write(struct.pack('>cI', self.channel, len(data))) |
|
40 | 40 | self.out.write(data) |
|
41 | 41 | self.out.flush() |
|
42 | 42 | |
|
43 | 43 | def __getattr__(self, attr): |
|
44 | 44 | if attr in ('isatty', 'fileno'): |
|
45 | 45 | raise AttributeError, attr |
|
46 | 46 | return getattr(self.in_, attr) |
|
47 | 47 | |
|
48 | 48 | class channeledinput(object): |
|
49 | 49 | """ |
|
50 | 50 | Read data from in_. |
|
51 | 51 | |
|
52 | 52 | Requests for input are written to out in the following format: |
|
53 | 53 | channel identifier - 'I' for plain input, 'L' line based (1 byte) |
|
54 | 54 | how many bytes to send at most (unsigned int), |
|
55 | 55 | |
|
56 | 56 | The client replies with: |
|
57 | 57 | data length (unsigned int), 0 meaning EOF |
|
58 | 58 | data |
|
59 | 59 | """ |
|
60 | 60 | |
|
61 | 61 | maxchunksize = 4 * 1024 |
|
62 | 62 | |
|
63 | 63 | def __init__(self, in_, out, channel): |
|
64 | 64 | self.in_ = in_ |
|
65 | 65 | self.out = out |
|
66 | 66 | self.channel = channel |
|
67 | 67 | |
|
68 | 68 | def read(self, size=-1): |
|
69 | 69 | if size < 0: |
|
70 | 70 | # if we need to consume all the clients input, ask for 4k chunks |
|
71 | 71 | # so the pipe doesn't fill up risking a deadlock |
|
72 | 72 | size = self.maxchunksize |
|
73 | 73 | s = self._read(size, self.channel) |
|
74 | 74 | buf = s |
|
75 | 75 | while s: |
|
76 | 76 | s = self._read(size, self.channel) |
|
77 | 77 | buf += s |
|
78 | 78 | |
|
79 | 79 | return buf |
|
80 | 80 | else: |
|
81 | 81 | return self._read(size, self.channel) |
|
82 | 82 | |
|
83 | 83 | def _read(self, size, channel): |
|
84 | 84 | if not size: |
|
85 | 85 | return '' |
|
86 | 86 | assert size > 0 |
|
87 | 87 | |
|
88 | 88 | # tell the client we need at most size bytes |
|
89 | 89 | self.out.write(struct.pack('>cI', channel, size)) |
|
90 | 90 | self.out.flush() |
|
91 | 91 | |
|
92 | 92 | length = self.in_.read(4) |
|
93 | 93 | length = struct.unpack('>I', length)[0] |
|
94 | 94 | if not length: |
|
95 | 95 | return '' |
|
96 | 96 | else: |
|
97 | 97 | return self.in_.read(length) |
|
98 | 98 | |
|
99 | 99 | def readline(self, size=-1): |
|
100 | 100 | if size < 0: |
|
101 | 101 | size = self.maxchunksize |
|
102 | 102 | s = self._read(size, 'L') |
|
103 | 103 | buf = s |
|
104 | 104 | # keep asking for more until there's either no more or |
|
105 | 105 | # we got a full line |
|
106 | 106 | while s and s[-1] != '\n': |
|
107 | 107 | s = self._read(size, 'L') |
|
108 | 108 | buf += s |
|
109 | 109 | |
|
110 | 110 | return buf |
|
111 | 111 | else: |
|
112 | 112 | return self._read(size, 'L') |
|
113 | 113 | |
|
114 | 114 | def __iter__(self): |
|
115 | 115 | return self |
|
116 | 116 | |
|
117 | 117 | def next(self): |
|
118 | 118 | l = self.readline() |
|
119 | 119 | if not l: |
|
120 | 120 | raise StopIteration |
|
121 | 121 | return l |
|
122 | 122 | |
|
123 | 123 | def __getattr__(self, attr): |
|
124 | 124 | if attr in ('isatty', 'fileno'): |
|
125 | 125 | raise AttributeError, attr |
|
126 | 126 | return getattr(self.in_, attr) |
|
127 | 127 | |
|
128 | 128 | class server(object): |
|
129 | 129 | """ |
|
130 | 130 | Listens for commands on stdin, runs them and writes the output on a channel |
|
131 | 131 | based stream to stdout. |
|
132 | 132 | """ |
|
133 | 133 | def __init__(self, ui, repo, mode): |
|
134 | 134 | self.cwd = os.getcwd() |
|
135 | 135 | |
|
136 | 136 | logpath = ui.config("cmdserver", "log", None) |
|
137 | 137 | if logpath: |
|
138 | 138 | global logfile |
|
139 | 139 | if logpath == '-': |
|
140 | 140 | # write log on a special 'd'ebug channel |
|
141 | 141 | logfile = channeledoutput(sys.stdout, sys.stdout, 'd') |
|
142 | 142 | else: |
|
143 | 143 | logfile = open(logpath, 'a') |
|
144 | 144 | |
|
145 | 145 | # the ui here is really the repo ui so take its baseui so we don't end |
|
146 | 146 | # up with its local configuration |
|
147 | 147 | self.ui = repo.baseui |
|
148 | 148 | self.repo = repo |
|
149 | 149 | self.repoui = repo.ui |
|
150 | 150 | |
|
151 | 151 | if mode == 'pipe': |
|
152 | 152 | self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e') |
|
153 | 153 | self.cout = channeledoutput(sys.stdout, sys.stdout, 'o') |
|
154 | 154 | self.cin = channeledinput(sys.stdin, sys.stdout, 'I') |
|
155 | 155 | self.cresult = channeledoutput(sys.stdout, sys.stdout, 'r') |
|
156 | 156 | |
|
157 | 157 | self.client = sys.stdin |
|
158 | 158 | else: |
|
159 | 159 | raise util.Abort(_('unknown mode %s') % mode) |
|
160 | 160 | |
|
161 | 161 | def _read(self, size): |
|
162 | 162 | if not size: |
|
163 | 163 | return '' |
|
164 | 164 | |
|
165 | 165 | data = self.client.read(size) |
|
166 | 166 | |
|
167 | 167 | # is the other end closed? |
|
168 | 168 | if not data: |
|
169 |
raise EOFError |
|
|
169 | raise EOFError | |
|
170 | 170 | |
|
171 | 171 | return data |
|
172 | 172 | |
|
173 | 173 | def runcommand(self): |
|
174 | 174 | """ reads a list of \0 terminated arguments, executes |
|
175 | 175 | and writes the return code to the result channel """ |
|
176 | 176 | |
|
177 | 177 | length = struct.unpack('>I', self._read(4))[0] |
|
178 | 178 | if not length: |
|
179 | 179 | args = [] |
|
180 | 180 | else: |
|
181 | 181 | args = self._read(length).split('\0') |
|
182 | 182 | |
|
183 | 183 | # copy the uis so changes (e.g. --config or --verbose) don't |
|
184 | 184 | # persist between requests |
|
185 | 185 | copiedui = self.ui.copy() |
|
186 | 186 | self.repo.baseui = copiedui |
|
187 | 187 | self.repo.ui = self.repo.dirstate._ui = self.repoui.copy() |
|
188 | 188 | self.repo.invalidate() |
|
189 | 189 | self.repo.invalidatedirstate() |
|
190 | 190 | |
|
191 | 191 | req = dispatch.request(args[:], copiedui, self.repo, self.cin, |
|
192 | 192 | self.cout, self.cerr) |
|
193 | 193 | |
|
194 | 194 | ret = dispatch.dispatch(req) or 0 # might return None |
|
195 | 195 | |
|
196 | 196 | # restore old cwd |
|
197 | 197 | if '--cwd' in args: |
|
198 | 198 | os.chdir(self.cwd) |
|
199 | 199 | |
|
200 | 200 | self.cresult.write(struct.pack('>i', int(ret))) |
|
201 | 201 | |
|
202 | 202 | def getencoding(self): |
|
203 | 203 | """ writes the current encoding to the result channel """ |
|
204 | 204 | self.cresult.write(encoding.encoding) |
|
205 | 205 | |
|
206 | 206 | def serveone(self): |
|
207 | 207 | cmd = self.client.readline()[:-1] |
|
208 | 208 | if cmd: |
|
209 | 209 | handler = self.capabilities.get(cmd) |
|
210 | 210 | if handler: |
|
211 | 211 | handler(self) |
|
212 | 212 | else: |
|
213 | 213 | # clients are expected to check what commands are supported by |
|
214 | 214 | # looking at the servers capabilities |
|
215 | 215 | raise util.Abort(_('unknown command %s') % cmd) |
|
216 | 216 | |
|
217 | 217 | return cmd != '' |
|
218 | 218 | |
|
219 | 219 | capabilities = {'runcommand' : runcommand, |
|
220 | 220 | 'getencoding' : getencoding} |
|
221 | 221 | |
|
222 | 222 | def serve(self): |
|
223 | 223 | hellomsg = 'capabilities: ' + ' '.join(self.capabilities.keys()) |
|
224 | 224 | hellomsg += '\n' |
|
225 | 225 | hellomsg += 'encoding: ' + encoding.encoding |
|
226 | 226 | |
|
227 | 227 | # write the hello msg in -one- chunk |
|
228 | 228 | self.cout.write(hellomsg) |
|
229 | 229 | |
|
230 | 230 | try: |
|
231 | 231 | while self.serveone(): |
|
232 | 232 | pass |
|
233 | 233 | except EOFError: |
|
234 | 234 | # we'll get here if the client disconnected while we were reading |
|
235 | 235 | # its request |
|
236 | 236 | return 1 |
|
237 | 237 | |
|
238 | 238 | return 0 |
@@ -1,277 +1,277 | |||
|
1 | 1 | # dagutil.py - dag utilities for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2010 Benoit Boissinot <bboissin@gmail.com> |
|
4 | 4 | # and Peter Arrenbrecht <peter@arrenbrecht.ch> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from node import nullrev |
|
10 | 10 | from i18n import _ |
|
11 | 11 | |
|
12 | 12 | |
|
13 | 13 | class basedag(object): |
|
14 | 14 | '''generic interface for DAGs |
|
15 | 15 | |
|
16 | 16 | terms: |
|
17 | 17 | "ix" (short for index) identifies a nodes internally, |
|
18 | 18 | "id" identifies one externally. |
|
19 | 19 | |
|
20 | 20 | All params are ixs unless explicitly suffixed otherwise. |
|
21 | 21 | Pluralized params are lists or sets. |
|
22 | 22 | ''' |
|
23 | 23 | |
|
24 | 24 | def __init__(self): |
|
25 | 25 | self._inverse = None |
|
26 | 26 | |
|
27 | 27 | def nodeset(self): |
|
28 | 28 | '''set of all node idxs''' |
|
29 |
raise NotImplementedError |
|
|
29 | raise NotImplementedError | |
|
30 | 30 | |
|
31 | 31 | def heads(self): |
|
32 | 32 | '''list of head ixs''' |
|
33 |
raise NotImplementedError |
|
|
33 | raise NotImplementedError | |
|
34 | 34 | |
|
35 | 35 | def parents(self, ix): |
|
36 | 36 | '''list of parents ixs of ix''' |
|
37 |
raise NotImplementedError |
|
|
37 | raise NotImplementedError | |
|
38 | 38 | |
|
39 | 39 | def inverse(self): |
|
40 | 40 | '''inverse DAG, where parents becomes children, etc.''' |
|
41 |
raise NotImplementedError |
|
|
41 | raise NotImplementedError | |
|
42 | 42 | |
|
43 | 43 | def ancestorset(self, starts, stops=None): |
|
44 | 44 | ''' |
|
45 | 45 | set of all ancestors of starts (incl), but stop walk at stops (excl) |
|
46 | 46 | ''' |
|
47 |
raise NotImplementedError |
|
|
47 | raise NotImplementedError | |
|
48 | 48 | |
|
49 | 49 | def descendantset(self, starts, stops=None): |
|
50 | 50 | ''' |
|
51 | 51 | set of all descendants of starts (incl), but stop walk at stops (excl) |
|
52 | 52 | ''' |
|
53 | 53 | return self.inverse().ancestorset(starts, stops) |
|
54 | 54 | |
|
55 | 55 | def headsetofconnecteds(self, ixs): |
|
56 | 56 | ''' |
|
57 | 57 | subset of connected list of ixs so that no node has a descendant in it |
|
58 | 58 | |
|
59 | 59 | By "connected list" we mean that if an ancestor and a descendant are in |
|
60 | 60 | the list, then so is at least one path connecting them. |
|
61 | 61 | ''' |
|
62 |
raise NotImplementedError |
|
|
62 | raise NotImplementedError | |
|
63 | 63 | |
|
64 | 64 | def externalize(self, ix): |
|
65 | 65 | '''return a list of (or set if given a set) of node ids''' |
|
66 | 66 | return self._externalize(ix) |
|
67 | 67 | |
|
68 | 68 | def externalizeall(self, ixs): |
|
69 | 69 | '''return a list of (or set if given a set) of node ids''' |
|
70 | 70 | ids = self._externalizeall(ixs) |
|
71 | 71 | if isinstance(ixs, set): |
|
72 | 72 | return set(ids) |
|
73 | 73 | return list(ids) |
|
74 | 74 | |
|
75 | 75 | def internalize(self, id): |
|
76 | 76 | '''return a list of (or set if given a set) of node ixs''' |
|
77 | 77 | return self._internalize(id) |
|
78 | 78 | |
|
79 | 79 | def internalizeall(self, ids, filterunknown=False): |
|
80 | 80 | '''return a list of (or set if given a set) of node ids''' |
|
81 | 81 | ixs = self._internalizeall(ids, filterunknown) |
|
82 | 82 | if isinstance(ids, set): |
|
83 | 83 | return set(ixs) |
|
84 | 84 | return list(ixs) |
|
85 | 85 | |
|
86 | 86 | |
|
87 | 87 | class genericdag(basedag): |
|
88 | 88 | '''generic implementations for DAGs''' |
|
89 | 89 | |
|
90 | 90 | def ancestorset(self, starts, stops=None): |
|
91 | 91 | stops = stops and set(stops) or set() |
|
92 | 92 | seen = set() |
|
93 | 93 | pending = list(starts) |
|
94 | 94 | while pending: |
|
95 | 95 | n = pending.pop() |
|
96 | 96 | if n not in seen and n not in stops: |
|
97 | 97 | seen.add(n) |
|
98 | 98 | pending.extend(self.parents(n)) |
|
99 | 99 | return seen |
|
100 | 100 | |
|
101 | 101 | def headsetofconnecteds(self, ixs): |
|
102 | 102 | hds = set(ixs) |
|
103 | 103 | if not hds: |
|
104 | 104 | return hds |
|
105 | 105 | for n in ixs: |
|
106 | 106 | for p in self.parents(n): |
|
107 | 107 | hds.discard(p) |
|
108 | 108 | assert hds |
|
109 | 109 | return hds |
|
110 | 110 | |
|
111 | 111 | |
|
112 | 112 | class revlogbaseddag(basedag): |
|
113 | 113 | '''generic dag interface to a revlog''' |
|
114 | 114 | |
|
115 | 115 | def __init__(self, revlog, nodeset): |
|
116 | 116 | basedag.__init__(self) |
|
117 | 117 | self._revlog = revlog |
|
118 | 118 | self._heads = None |
|
119 | 119 | self._nodeset = nodeset |
|
120 | 120 | |
|
121 | 121 | def nodeset(self): |
|
122 | 122 | return self._nodeset |
|
123 | 123 | |
|
124 | 124 | def heads(self): |
|
125 | 125 | if self._heads is None: |
|
126 | 126 | self._heads = self._getheads() |
|
127 | 127 | return self._heads |
|
128 | 128 | |
|
129 | 129 | def _externalize(self, ix): |
|
130 | 130 | return self._revlog.index[ix][7] |
|
131 | 131 | def _externalizeall(self, ixs): |
|
132 | 132 | idx = self._revlog.index |
|
133 | 133 | return [idx[i][7] for i in ixs] |
|
134 | 134 | |
|
135 | 135 | def _internalize(self, id): |
|
136 | 136 | ix = self._revlog.rev(id) |
|
137 | 137 | if ix == nullrev: |
|
138 | 138 | raise LookupError(id, self._revlog.indexfile, _('nullid')) |
|
139 | 139 | return ix |
|
140 | 140 | def _internalizeall(self, ids, filterunknown): |
|
141 | 141 | rl = self._revlog |
|
142 | 142 | if filterunknown: |
|
143 | 143 | return [r for r in map(rl.nodemap.get, ids) |
|
144 | 144 | if r is not None and r != nullrev] |
|
145 | 145 | return map(self._internalize, ids) |
|
146 | 146 | |
|
147 | 147 | |
|
148 | 148 | class revlogdag(revlogbaseddag): |
|
149 | 149 | '''dag interface to a revlog''' |
|
150 | 150 | |
|
151 | 151 | def __init__(self, revlog): |
|
152 | 152 | revlogbaseddag.__init__(self, revlog, set(xrange(len(revlog)))) |
|
153 | 153 | |
|
154 | 154 | def _getheads(self): |
|
155 | 155 | return [r for r in self._revlog.headrevs() if r != nullrev] |
|
156 | 156 | |
|
157 | 157 | def parents(self, ix): |
|
158 | 158 | rlog = self._revlog |
|
159 | 159 | idx = rlog.index |
|
160 | 160 | revdata = idx[ix] |
|
161 | 161 | prev = revdata[5] |
|
162 | 162 | if prev != nullrev: |
|
163 | 163 | prev2 = revdata[6] |
|
164 | 164 | if prev2 == nullrev: |
|
165 | 165 | return [prev] |
|
166 | 166 | return [prev, prev2] |
|
167 | 167 | prev2 = revdata[6] |
|
168 | 168 | if prev2 != nullrev: |
|
169 | 169 | return [prev2] |
|
170 | 170 | return [] |
|
171 | 171 | |
|
172 | 172 | def inverse(self): |
|
173 | 173 | if self._inverse is None: |
|
174 | 174 | self._inverse = inverserevlogdag(self) |
|
175 | 175 | return self._inverse |
|
176 | 176 | |
|
177 | 177 | def ancestorset(self, starts, stops=None): |
|
178 | 178 | rlog = self._revlog |
|
179 | 179 | idx = rlog.index |
|
180 | 180 | stops = stops and set(stops) or set() |
|
181 | 181 | seen = set() |
|
182 | 182 | pending = list(starts) |
|
183 | 183 | while pending: |
|
184 | 184 | rev = pending.pop() |
|
185 | 185 | if rev not in seen and rev not in stops: |
|
186 | 186 | seen.add(rev) |
|
187 | 187 | revdata = idx[rev] |
|
188 | 188 | for i in [5, 6]: |
|
189 | 189 | prev = revdata[i] |
|
190 | 190 | if prev != nullrev: |
|
191 | 191 | pending.append(prev) |
|
192 | 192 | return seen |
|
193 | 193 | |
|
194 | 194 | def headsetofconnecteds(self, ixs): |
|
195 | 195 | if not ixs: |
|
196 | 196 | return set() |
|
197 | 197 | rlog = self._revlog |
|
198 | 198 | idx = rlog.index |
|
199 | 199 | headrevs = set(ixs) |
|
200 | 200 | for rev in ixs: |
|
201 | 201 | revdata = idx[rev] |
|
202 | 202 | for i in [5, 6]: |
|
203 | 203 | prev = revdata[i] |
|
204 | 204 | if prev != nullrev: |
|
205 | 205 | headrevs.discard(prev) |
|
206 | 206 | assert headrevs |
|
207 | 207 | return headrevs |
|
208 | 208 | |
|
209 | 209 | def linearize(self, ixs): |
|
210 | 210 | '''linearize and topologically sort a list of revisions |
|
211 | 211 | |
|
212 | 212 | The linearization process tries to create long runs of revs where |
|
213 | 213 | a child rev comes immediately after its first parent. This is done by |
|
214 | 214 | visiting the heads of the given revs in inverse topological order, |
|
215 | 215 | and for each visited rev, visiting its second parent, then its first |
|
216 | 216 | parent, then adding the rev itself to the output list. |
|
217 | 217 | ''' |
|
218 | 218 | sorted = [] |
|
219 | 219 | visit = list(self.headsetofconnecteds(ixs)) |
|
220 | 220 | visit.sort(reverse=True) |
|
221 | 221 | finished = set() |
|
222 | 222 | |
|
223 | 223 | while visit: |
|
224 | 224 | cur = visit.pop() |
|
225 | 225 | if cur < 0: |
|
226 | 226 | cur = -cur - 1 |
|
227 | 227 | if cur not in finished: |
|
228 | 228 | sorted.append(cur) |
|
229 | 229 | finished.add(cur) |
|
230 | 230 | else: |
|
231 | 231 | visit.append(-cur - 1) |
|
232 | 232 | visit += [p for p in self.parents(cur) |
|
233 | 233 | if p in ixs and p not in finished] |
|
234 | 234 | assert len(sorted) == len(ixs) |
|
235 | 235 | return sorted |
|
236 | 236 | |
|
237 | 237 | |
|
238 | 238 | class inverserevlogdag(revlogbaseddag, genericdag): |
|
239 | 239 | '''inverse of an existing revlog dag; see revlogdag.inverse()''' |
|
240 | 240 | |
|
241 | 241 | def __init__(self, orig): |
|
242 | 242 | revlogbaseddag.__init__(self, orig._revlog, orig._nodeset) |
|
243 | 243 | self._orig = orig |
|
244 | 244 | self._children = {} |
|
245 | 245 | self._roots = [] |
|
246 | 246 | self._walkfrom = len(self._revlog) - 1 |
|
247 | 247 | |
|
248 | 248 | def _walkto(self, walkto): |
|
249 | 249 | rev = self._walkfrom |
|
250 | 250 | cs = self._children |
|
251 | 251 | roots = self._roots |
|
252 | 252 | idx = self._revlog.index |
|
253 | 253 | while rev >= walkto: |
|
254 | 254 | data = idx[rev] |
|
255 | 255 | isroot = True |
|
256 | 256 | for prev in [data[5], data[6]]: # parent revs |
|
257 | 257 | if prev != nullrev: |
|
258 | 258 | cs.setdefault(prev, []).append(rev) |
|
259 | 259 | isroot = False |
|
260 | 260 | if isroot: |
|
261 | 261 | roots.append(rev) |
|
262 | 262 | rev -= 1 |
|
263 | 263 | self._walkfrom = rev |
|
264 | 264 | |
|
265 | 265 | def _getheads(self): |
|
266 | 266 | self._walkto(nullrev) |
|
267 | 267 | return self._roots |
|
268 | 268 | |
|
269 | 269 | def parents(self, ix): |
|
270 | 270 | if ix is None: |
|
271 | 271 | return [] |
|
272 | 272 | if ix <= self._walkfrom: |
|
273 | 273 | self._walkto(ix) |
|
274 | 274 | return self._children.get(ix, []) |
|
275 | 275 | |
|
276 | 276 | def inverse(self): |
|
277 | 277 | return self._orig |
@@ -1,186 +1,186 | |||
|
1 | 1 | # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod |
|
2 | 2 | # |
|
3 | 3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
4 | 4 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | import errno, mimetypes, os |
|
10 | 10 | |
|
11 | 11 | HTTP_OK = 200 |
|
12 | 12 | HTTP_NOT_MODIFIED = 304 |
|
13 | 13 | HTTP_BAD_REQUEST = 400 |
|
14 | 14 | HTTP_UNAUTHORIZED = 401 |
|
15 | 15 | HTTP_FORBIDDEN = 403 |
|
16 | 16 | HTTP_NOT_FOUND = 404 |
|
17 | 17 | HTTP_METHOD_NOT_ALLOWED = 405 |
|
18 | 18 | HTTP_SERVER_ERROR = 500 |
|
19 | 19 | |
|
20 | 20 | |
|
21 | 21 | def checkauthz(hgweb, req, op): |
|
22 | 22 | '''Check permission for operation based on request data (including |
|
23 | 23 | authentication info). Return if op allowed, else raise an ErrorResponse |
|
24 | 24 | exception.''' |
|
25 | 25 | |
|
26 | 26 | user = req.env.get('REMOTE_USER') |
|
27 | 27 | |
|
28 | 28 | deny_read = hgweb.configlist('web', 'deny_read') |
|
29 | 29 | if deny_read and (not user or deny_read == ['*'] or user in deny_read): |
|
30 | 30 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') |
|
31 | 31 | |
|
32 | 32 | allow_read = hgweb.configlist('web', 'allow_read') |
|
33 | 33 | result = (not allow_read) or (allow_read == ['*']) |
|
34 | 34 | if not (result or user in allow_read): |
|
35 | 35 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') |
|
36 | 36 | |
|
37 | 37 | if op == 'pull' and not hgweb.allowpull: |
|
38 | 38 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized') |
|
39 | 39 | elif op == 'pull' or op is None: # op is None for interface requests |
|
40 | 40 | return |
|
41 | 41 | |
|
42 | 42 | # enforce that you can only push using POST requests |
|
43 | 43 | if req.env['REQUEST_METHOD'] != 'POST': |
|
44 | 44 | msg = 'push requires POST request' |
|
45 | 45 | raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg) |
|
46 | 46 | |
|
47 | 47 | # require ssl by default for pushing, auth info cannot be sniffed |
|
48 | 48 | # and replayed |
|
49 | 49 | scheme = req.env.get('wsgi.url_scheme') |
|
50 | 50 | if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https': |
|
51 | 51 | raise ErrorResponse(HTTP_OK, 'ssl required') |
|
52 | 52 | |
|
53 | 53 | deny = hgweb.configlist('web', 'deny_push') |
|
54 | 54 | if deny and (not user or deny == ['*'] or user in deny): |
|
55 | 55 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') |
|
56 | 56 | |
|
57 | 57 | allow = hgweb.configlist('web', 'allow_push') |
|
58 | 58 | result = allow and (allow == ['*'] or user in allow) |
|
59 | 59 | if not result: |
|
60 | 60 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') |
|
61 | 61 | |
|
62 | 62 | # Hooks for hgweb permission checks; extensions can add hooks here. |
|
63 | 63 | # Each hook is invoked like this: hook(hgweb, request, operation), |
|
64 | 64 | # where operation is either read, pull or push. Hooks should either |
|
65 | 65 | # raise an ErrorResponse exception, or just return. |
|
66 | 66 | # |
|
67 | 67 | # It is possible to do both authentication and authorization through |
|
68 | 68 | # this. |
|
69 | 69 | permhooks = [checkauthz] |
|
70 | 70 | |
|
71 | 71 | |
|
72 | 72 | class ErrorResponse(Exception): |
|
73 | 73 | def __init__(self, code, message=None, headers=[]): |
|
74 | 74 | if message is None: |
|
75 | 75 | message = _statusmessage(code) |
|
76 | 76 | Exception.__init__(self) |
|
77 | 77 | self.code = code |
|
78 | 78 | self.message = message |
|
79 | 79 | self.headers = headers |
|
80 | 80 | def __str__(self): |
|
81 | 81 | return self.message |
|
82 | 82 | |
|
83 | 83 | class continuereader(object): |
|
84 | 84 | def __init__(self, f, write): |
|
85 | 85 | self.f = f |
|
86 | 86 | self._write = write |
|
87 | 87 | self.continued = False |
|
88 | 88 | |
|
89 | 89 | def read(self, amt=-1): |
|
90 | 90 | if not self.continued: |
|
91 | 91 | self.continued = True |
|
92 | 92 | self._write('HTTP/1.1 100 Continue\r\n\r\n') |
|
93 | 93 | return self.f.read(amt) |
|
94 | 94 | |
|
95 | 95 | def __getattr__(self, attr): |
|
96 | 96 | if attr in ('close', 'readline', 'readlines', '__iter__'): |
|
97 | 97 | return getattr(self.f, attr) |
|
98 |
raise AttributeError |
|
|
98 | raise AttributeError | |
|
99 | 99 | |
|
100 | 100 | def _statusmessage(code): |
|
101 | 101 | from BaseHTTPServer import BaseHTTPRequestHandler |
|
102 | 102 | responses = BaseHTTPRequestHandler.responses |
|
103 | 103 | return responses.get(code, ('Error', 'Unknown error'))[0] |
|
104 | 104 | |
|
105 | 105 | def statusmessage(code, message=None): |
|
106 | 106 | return '%d %s' % (code, message or _statusmessage(code)) |
|
107 | 107 | |
|
108 | 108 | def get_stat(spath): |
|
109 | 109 | """stat changelog if it exists, spath otherwise""" |
|
110 | 110 | cl_path = os.path.join(spath, "00changelog.i") |
|
111 | 111 | if os.path.exists(cl_path): |
|
112 | 112 | return os.stat(cl_path) |
|
113 | 113 | else: |
|
114 | 114 | return os.stat(spath) |
|
115 | 115 | |
|
116 | 116 | def get_mtime(spath): |
|
117 | 117 | return get_stat(spath).st_mtime |
|
118 | 118 | |
|
119 | 119 | def staticfile(directory, fname, req): |
|
120 | 120 | """return a file inside directory with guessed Content-Type header |
|
121 | 121 | |
|
122 | 122 | fname always uses '/' as directory separator and isn't allowed to |
|
123 | 123 | contain unusual path components. |
|
124 | 124 | Content-Type is guessed using the mimetypes module. |
|
125 | 125 | Return an empty string if fname is illegal or file not found. |
|
126 | 126 | |
|
127 | 127 | """ |
|
128 | 128 | parts = fname.split('/') |
|
129 | 129 | for part in parts: |
|
130 | 130 | if (part in ('', os.curdir, os.pardir) or |
|
131 | 131 | os.sep in part or os.altsep is not None and os.altsep in part): |
|
132 | 132 | return "" |
|
133 | 133 | fpath = os.path.join(*parts) |
|
134 | 134 | if isinstance(directory, str): |
|
135 | 135 | directory = [directory] |
|
136 | 136 | for d in directory: |
|
137 | 137 | path = os.path.join(d, fpath) |
|
138 | 138 | if os.path.exists(path): |
|
139 | 139 | break |
|
140 | 140 | try: |
|
141 | 141 | os.stat(path) |
|
142 | 142 | ct = mimetypes.guess_type(path)[0] or "text/plain" |
|
143 | 143 | req.respond(HTTP_OK, ct, length = os.path.getsize(path)) |
|
144 | 144 | fp = open(path, 'rb') |
|
145 | 145 | data = fp.read() |
|
146 | 146 | fp.close() |
|
147 | 147 | return data |
|
148 | 148 | except TypeError: |
|
149 | 149 | raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename') |
|
150 | 150 | except OSError, err: |
|
151 | 151 | if err.errno == errno.ENOENT: |
|
152 | 152 | raise ErrorResponse(HTTP_NOT_FOUND) |
|
153 | 153 | else: |
|
154 | 154 | raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror) |
|
155 | 155 | |
|
156 | 156 | def paritygen(stripecount, offset=0): |
|
157 | 157 | """count parity of horizontal stripes for easier reading""" |
|
158 | 158 | if stripecount and offset: |
|
159 | 159 | # account for offset, e.g. due to building the list in reverse |
|
160 | 160 | count = (stripecount + offset) % stripecount |
|
161 | 161 | parity = (stripecount + offset) / stripecount & 1 |
|
162 | 162 | else: |
|
163 | 163 | count = 0 |
|
164 | 164 | parity = 0 |
|
165 | 165 | while True: |
|
166 | 166 | yield parity |
|
167 | 167 | count += 1 |
|
168 | 168 | if stripecount and count >= stripecount: |
|
169 | 169 | parity = 1 - parity |
|
170 | 170 | count = 0 |
|
171 | 171 | |
|
172 | 172 | def get_contact(config): |
|
173 | 173 | """Return repo contact information or empty string. |
|
174 | 174 | |
|
175 | 175 | web.contact is the primary source, but if that is not set, try |
|
176 | 176 | ui.username or $EMAIL as a fallback to display something useful. |
|
177 | 177 | """ |
|
178 | 178 | return (config("web", "contact") or |
|
179 | 179 | config("ui", "username") or |
|
180 | 180 | os.environ.get("EMAIL") or "") |
|
181 | 181 | |
|
182 | 182 | def caching(web, req): |
|
183 | 183 | tag = str(web.mtime) |
|
184 | 184 | if req.env.get('HTTP_IF_NONE_MATCH') == tag: |
|
185 | 185 | raise ErrorResponse(HTTP_NOT_MODIFIED) |
|
186 | 186 | req.headers.append(('ETag', tag)) |
@@ -1,764 +1,764 | |||
|
1 | 1 | # This library is free software; you can redistribute it and/or |
|
2 | 2 | # modify it under the terms of the GNU Lesser General Public |
|
3 | 3 | # License as published by the Free Software Foundation; either |
|
4 | 4 | # version 2.1 of the License, or (at your option) any later version. |
|
5 | 5 | # |
|
6 | 6 | # This library is distributed in the hope that it will be useful, |
|
7 | 7 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | 8 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
9 | 9 | # Lesser General Public License for more details. |
|
10 | 10 | # |
|
11 | 11 | # You should have received a copy of the GNU Lesser General Public |
|
12 | 12 | # License along with this library; if not, see |
|
13 | 13 | # <http://www.gnu.org/licenses/>. |
|
14 | 14 | |
|
15 | 15 | # This file is part of urlgrabber, a high-level cross-protocol url-grabber |
|
16 | 16 | # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko |
|
17 | 17 | |
|
18 | 18 | # Modified by Benoit Boissinot: |
|
19 | 19 | # - fix for digest auth (inspired from urllib2.py @ Python v2.4) |
|
20 | 20 | # Modified by Dirkjan Ochtman: |
|
21 | 21 | # - import md5 function from a local util module |
|
22 | 22 | # Modified by Martin Geisler: |
|
23 | 23 | # - moved md5 function from local util module to this module |
|
24 | 24 | # Modified by Augie Fackler: |
|
25 | 25 | # - add safesend method and use it to prevent broken pipe errors |
|
26 | 26 | # on large POST requests |
|
27 | 27 | |
|
28 | 28 | """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive. |
|
29 | 29 | |
|
30 | 30 | >>> import urllib2 |
|
31 | 31 | >>> from keepalive import HTTPHandler |
|
32 | 32 | >>> keepalive_handler = HTTPHandler() |
|
33 | 33 | >>> opener = urllib2.build_opener(keepalive_handler) |
|
34 | 34 | >>> urllib2.install_opener(opener) |
|
35 | 35 | >>> |
|
36 | 36 | >>> fo = urllib2.urlopen('http://www.python.org') |
|
37 | 37 | |
|
38 | 38 | If a connection to a given host is requested, and all of the existing |
|
39 | 39 | connections are still in use, another connection will be opened. If |
|
40 | 40 | the handler tries to use an existing connection but it fails in some |
|
41 | 41 | way, it will be closed and removed from the pool. |
|
42 | 42 | |
|
43 | 43 | To remove the handler, simply re-run build_opener with no arguments, and |
|
44 | 44 | install that opener. |
|
45 | 45 | |
|
46 | 46 | You can explicitly close connections by using the close_connection() |
|
47 | 47 | method of the returned file-like object (described below) or you can |
|
48 | 48 | use the handler methods: |
|
49 | 49 | |
|
50 | 50 | close_connection(host) |
|
51 | 51 | close_all() |
|
52 | 52 | open_connections() |
|
53 | 53 | |
|
54 | 54 | NOTE: using the close_connection and close_all methods of the handler |
|
55 | 55 | should be done with care when using multiple threads. |
|
56 | 56 | * there is nothing that prevents another thread from creating new |
|
57 | 57 | connections immediately after connections are closed |
|
58 | 58 | * no checks are done to prevent in-use connections from being closed |
|
59 | 59 | |
|
60 | 60 | >>> keepalive_handler.close_all() |
|
61 | 61 | |
|
62 | 62 | EXTRA ATTRIBUTES AND METHODS |
|
63 | 63 | |
|
64 | 64 | Upon a status of 200, the object returned has a few additional |
|
65 | 65 | attributes and methods, which should not be used if you want to |
|
66 | 66 | remain consistent with the normal urllib2-returned objects: |
|
67 | 67 | |
|
68 | 68 | close_connection() - close the connection to the host |
|
69 | 69 | readlines() - you know, readlines() |
|
70 | 70 | status - the return status (ie 404) |
|
71 | 71 | reason - english translation of status (ie 'File not found') |
|
72 | 72 | |
|
73 | 73 | If you want the best of both worlds, use this inside an |
|
74 | 74 | AttributeError-catching try: |
|
75 | 75 | |
|
76 | 76 | >>> try: status = fo.status |
|
77 | 77 | >>> except AttributeError: status = None |
|
78 | 78 | |
|
79 | 79 | Unfortunately, these are ONLY there if status == 200, so it's not |
|
80 | 80 | easy to distinguish between non-200 responses. The reason is that |
|
81 | 81 | urllib2 tries to do clever things with error codes 301, 302, 401, |
|
82 | 82 | and 407, and it wraps the object upon return. |
|
83 | 83 | |
|
84 | 84 | For python versions earlier than 2.4, you can avoid this fancy error |
|
85 | 85 | handling by setting the module-level global HANDLE_ERRORS to zero. |
|
86 | 86 | You see, prior to 2.4, it's the HTTP Handler's job to determine what |
|
87 | 87 | to handle specially, and what to just pass up. HANDLE_ERRORS == 0 |
|
88 | 88 | means "pass everything up". In python 2.4, however, this job no |
|
89 | 89 | longer belongs to the HTTP Handler and is now done by a NEW handler, |
|
90 | 90 | HTTPErrorProcessor. Here's the bottom line: |
|
91 | 91 | |
|
92 | 92 | python version < 2.4 |
|
93 | 93 | HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as |
|
94 | 94 | errors |
|
95 | 95 | HANDLE_ERRORS == 0 pass everything up, error processing is |
|
96 | 96 | left to the calling code |
|
97 | 97 | python version >= 2.4 |
|
98 | 98 | HANDLE_ERRORS == 1 pass up 200, treat the rest as errors |
|
99 | 99 | HANDLE_ERRORS == 0 (default) pass everything up, let the |
|
100 | 100 | other handlers (specifically, |
|
101 | 101 | HTTPErrorProcessor) decide what to do |
|
102 | 102 | |
|
103 | 103 | In practice, setting the variable either way makes little difference |
|
104 | 104 | in python 2.4, so for the most consistent behavior across versions, |
|
105 | 105 | you probably just want to use the defaults, which will give you |
|
106 | 106 | exceptions on errors. |
|
107 | 107 | |
|
108 | 108 | """ |
|
109 | 109 | |
|
110 | 110 | # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $ |
|
111 | 111 | |
|
112 | 112 | import errno |
|
113 | 113 | import httplib |
|
114 | 114 | import socket |
|
115 | 115 | import thread |
|
116 | 116 | import urllib2 |
|
117 | 117 | |
|
118 | 118 | DEBUG = None |
|
119 | 119 | |
|
120 | 120 | import sys |
|
121 | 121 | if sys.version_info < (2, 4): |
|
122 | 122 | HANDLE_ERRORS = 1 |
|
123 | 123 | else: HANDLE_ERRORS = 0 |
|
124 | 124 | |
|
125 | 125 | class ConnectionManager(object): |
|
126 | 126 | """ |
|
127 | 127 | The connection manager must be able to: |
|
128 | 128 | * keep track of all existing |
|
129 | 129 | """ |
|
130 | 130 | def __init__(self): |
|
131 | 131 | self._lock = thread.allocate_lock() |
|
132 | 132 | self._hostmap = {} # map hosts to a list of connections |
|
133 | 133 | self._connmap = {} # map connections to host |
|
134 | 134 | self._readymap = {} # map connection to ready state |
|
135 | 135 | |
|
136 | 136 | def add(self, host, connection, ready): |
|
137 | 137 | self._lock.acquire() |
|
138 | 138 | try: |
|
139 | 139 | if host not in self._hostmap: |
|
140 | 140 | self._hostmap[host] = [] |
|
141 | 141 | self._hostmap[host].append(connection) |
|
142 | 142 | self._connmap[connection] = host |
|
143 | 143 | self._readymap[connection] = ready |
|
144 | 144 | finally: |
|
145 | 145 | self._lock.release() |
|
146 | 146 | |
|
147 | 147 | def remove(self, connection): |
|
148 | 148 | self._lock.acquire() |
|
149 | 149 | try: |
|
150 | 150 | try: |
|
151 | 151 | host = self._connmap[connection] |
|
152 | 152 | except KeyError: |
|
153 | 153 | pass |
|
154 | 154 | else: |
|
155 | 155 | del self._connmap[connection] |
|
156 | 156 | del self._readymap[connection] |
|
157 | 157 | self._hostmap[host].remove(connection) |
|
158 | 158 | if not self._hostmap[host]: del self._hostmap[host] |
|
159 | 159 | finally: |
|
160 | 160 | self._lock.release() |
|
161 | 161 | |
|
162 | 162 | def set_ready(self, connection, ready): |
|
163 | 163 | try: |
|
164 | 164 | self._readymap[connection] = ready |
|
165 | 165 | except KeyError: |
|
166 | 166 | pass |
|
167 | 167 | |
|
168 | 168 | def get_ready_conn(self, host): |
|
169 | 169 | conn = None |
|
170 | 170 | self._lock.acquire() |
|
171 | 171 | try: |
|
172 | 172 | if host in self._hostmap: |
|
173 | 173 | for c in self._hostmap[host]: |
|
174 | 174 | if self._readymap[c]: |
|
175 | 175 | self._readymap[c] = 0 |
|
176 | 176 | conn = c |
|
177 | 177 | break |
|
178 | 178 | finally: |
|
179 | 179 | self._lock.release() |
|
180 | 180 | return conn |
|
181 | 181 | |
|
182 | 182 | def get_all(self, host=None): |
|
183 | 183 | if host: |
|
184 | 184 | return list(self._hostmap.get(host, [])) |
|
185 | 185 | else: |
|
186 | 186 | return dict(self._hostmap) |
|
187 | 187 | |
|
188 | 188 | class KeepAliveHandler(object): |
|
189 | 189 | def __init__(self): |
|
190 | 190 | self._cm = ConnectionManager() |
|
191 | 191 | |
|
192 | 192 | #### Connection Management |
|
193 | 193 | def open_connections(self): |
|
194 | 194 | """return a list of connected hosts and the number of connections |
|
195 | 195 | to each. [('foo.com:80', 2), ('bar.org', 1)]""" |
|
196 | 196 | return [(host, len(li)) for (host, li) in self._cm.get_all().items()] |
|
197 | 197 | |
|
198 | 198 | def close_connection(self, host): |
|
199 | 199 | """close connection(s) to <host> |
|
200 | 200 | host is the host:port spec, as in 'www.cnn.com:8080' as passed in. |
|
201 | 201 | no error occurs if there is no connection to that host.""" |
|
202 | 202 | for h in self._cm.get_all(host): |
|
203 | 203 | self._cm.remove(h) |
|
204 | 204 | h.close() |
|
205 | 205 | |
|
206 | 206 | def close_all(self): |
|
207 | 207 | """close all open connections""" |
|
208 | 208 | for host, conns in self._cm.get_all().iteritems(): |
|
209 | 209 | for h in conns: |
|
210 | 210 | self._cm.remove(h) |
|
211 | 211 | h.close() |
|
212 | 212 | |
|
213 | 213 | def _request_closed(self, request, host, connection): |
|
214 | 214 | """tells us that this request is now closed and the the |
|
215 | 215 | connection is ready for another request""" |
|
216 | 216 | self._cm.set_ready(connection, 1) |
|
217 | 217 | |
|
218 | 218 | def _remove_connection(self, host, connection, close=0): |
|
219 | 219 | if close: |
|
220 | 220 | connection.close() |
|
221 | 221 | self._cm.remove(connection) |
|
222 | 222 | |
|
223 | 223 | #### Transaction Execution |
|
224 | 224 | def http_open(self, req): |
|
225 | 225 | return self.do_open(HTTPConnection, req) |
|
226 | 226 | |
|
227 | 227 | def do_open(self, http_class, req): |
|
228 | 228 | host = req.get_host() |
|
229 | 229 | if not host: |
|
230 | 230 | raise urllib2.URLError('no host given') |
|
231 | 231 | |
|
232 | 232 | try: |
|
233 | 233 | h = self._cm.get_ready_conn(host) |
|
234 | 234 | while h: |
|
235 | 235 | r = self._reuse_connection(h, req, host) |
|
236 | 236 | |
|
237 | 237 | # if this response is non-None, then it worked and we're |
|
238 | 238 | # done. Break out, skipping the else block. |
|
239 | 239 | if r: |
|
240 | 240 | break |
|
241 | 241 | |
|
242 | 242 | # connection is bad - possibly closed by server |
|
243 | 243 | # discard it and ask for the next free connection |
|
244 | 244 | h.close() |
|
245 | 245 | self._cm.remove(h) |
|
246 | 246 | h = self._cm.get_ready_conn(host) |
|
247 | 247 | else: |
|
248 | 248 | # no (working) free connections were found. Create a new one. |
|
249 | 249 | h = http_class(host) |
|
250 | 250 | if DEBUG: |
|
251 | 251 | DEBUG.info("creating new connection to %s (%d)", |
|
252 | 252 | host, id(h)) |
|
253 | 253 | self._cm.add(host, h, 0) |
|
254 | 254 | self._start_transaction(h, req) |
|
255 | 255 | r = h.getresponse() |
|
256 | 256 | except (socket.error, httplib.HTTPException), err: |
|
257 | 257 | raise urllib2.URLError(err) |
|
258 | 258 | |
|
259 | 259 | # if not a persistent connection, don't try to reuse it |
|
260 | 260 | if r.will_close: |
|
261 | 261 | self._cm.remove(h) |
|
262 | 262 | |
|
263 | 263 | if DEBUG: |
|
264 | 264 | DEBUG.info("STATUS: %s, %s", r.status, r.reason) |
|
265 | 265 | r._handler = self |
|
266 | 266 | r._host = host |
|
267 | 267 | r._url = req.get_full_url() |
|
268 | 268 | r._connection = h |
|
269 | 269 | r.code = r.status |
|
270 | 270 | r.headers = r.msg |
|
271 | 271 | r.msg = r.reason |
|
272 | 272 | |
|
273 | 273 | if r.status == 200 or not HANDLE_ERRORS: |
|
274 | 274 | return r |
|
275 | 275 | else: |
|
276 | 276 | return self.parent.error('http', req, r, |
|
277 | 277 | r.status, r.msg, r.headers) |
|
278 | 278 | |
|
279 | 279 | def _reuse_connection(self, h, req, host): |
|
280 | 280 | """start the transaction with a re-used connection |
|
281 | 281 | return a response object (r) upon success or None on failure. |
|
282 | 282 | This DOES not close or remove bad connections in cases where |
|
283 | 283 | it returns. However, if an unexpected exception occurs, it |
|
284 | 284 | will close and remove the connection before re-raising. |
|
285 | 285 | """ |
|
286 | 286 | try: |
|
287 | 287 | self._start_transaction(h, req) |
|
288 | 288 | r = h.getresponse() |
|
289 | 289 | # note: just because we got something back doesn't mean it |
|
290 | 290 | # worked. We'll check the version below, too. |
|
291 | 291 | except (socket.error, httplib.HTTPException): |
|
292 | 292 | r = None |
|
293 | 293 | except: |
|
294 | 294 | # adding this block just in case we've missed |
|
295 | 295 | # something we will still raise the exception, but |
|
296 | 296 | # lets try and close the connection and remove it |
|
297 | 297 | # first. We previously got into a nasty loop |
|
298 | 298 | # where an exception was uncaught, and so the |
|
299 | 299 | # connection stayed open. On the next try, the |
|
300 | 300 | # same exception was raised, etc. The tradeoff is |
|
301 | 301 | # that it's now possible this call will raise |
|
302 | 302 | # a DIFFERENT exception |
|
303 | 303 | if DEBUG: |
|
304 | 304 | DEBUG.error("unexpected exception - closing " |
|
305 | 305 | "connection to %s (%d)", host, id(h)) |
|
306 | 306 | self._cm.remove(h) |
|
307 | 307 | h.close() |
|
308 | 308 | raise |
|
309 | 309 | |
|
310 | 310 | if r is None or r.version == 9: |
|
311 | 311 | # httplib falls back to assuming HTTP 0.9 if it gets a |
|
312 | 312 | # bad header back. This is most likely to happen if |
|
313 | 313 | # the socket has been closed by the server since we |
|
314 | 314 | # last used the connection. |
|
315 | 315 | if DEBUG: |
|
316 | 316 | DEBUG.info("failed to re-use connection to %s (%d)", |
|
317 | 317 | host, id(h)) |
|
318 | 318 | r = None |
|
319 | 319 | else: |
|
320 | 320 | if DEBUG: |
|
321 | 321 | DEBUG.info("re-using connection to %s (%d)", host, id(h)) |
|
322 | 322 | |
|
323 | 323 | return r |
|
324 | 324 | |
|
325 | 325 | def _start_transaction(self, h, req): |
|
326 | 326 | # What follows mostly reimplements HTTPConnection.request() |
|
327 | 327 | # except it adds self.parent.addheaders in the mix. |
|
328 | 328 | headers = req.headers.copy() |
|
329 | 329 | if sys.version_info >= (2, 4): |
|
330 | 330 | headers.update(req.unredirected_hdrs) |
|
331 | 331 | headers.update(self.parent.addheaders) |
|
332 | 332 | headers = dict((n.lower(), v) for n, v in headers.items()) |
|
333 | 333 | skipheaders = {} |
|
334 | 334 | for n in ('host', 'accept-encoding'): |
|
335 | 335 | if n in headers: |
|
336 | 336 | skipheaders['skip_' + n.replace('-', '_')] = 1 |
|
337 | 337 | try: |
|
338 | 338 | if req.has_data(): |
|
339 | 339 | data = req.get_data() |
|
340 | 340 | h.putrequest('POST', req.get_selector(), **skipheaders) |
|
341 | 341 | if 'content-type' not in headers: |
|
342 | 342 | h.putheader('Content-type', |
|
343 | 343 | 'application/x-www-form-urlencoded') |
|
344 | 344 | if 'content-length' not in headers: |
|
345 | 345 | h.putheader('Content-length', '%d' % len(data)) |
|
346 | 346 | else: |
|
347 | 347 | h.putrequest('GET', req.get_selector(), **skipheaders) |
|
348 | 348 | except (socket.error), err: |
|
349 | 349 | raise urllib2.URLError(err) |
|
350 | 350 | for k, v in headers.items(): |
|
351 | 351 | h.putheader(k, v) |
|
352 | 352 | h.endheaders() |
|
353 | 353 | if req.has_data(): |
|
354 | 354 | h.send(data) |
|
355 | 355 | |
|
356 | 356 | class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler): |
|
357 | 357 | pass |
|
358 | 358 | |
|
359 | 359 | class HTTPResponse(httplib.HTTPResponse): |
|
360 | 360 | # we need to subclass HTTPResponse in order to |
|
361 | 361 | # 1) add readline() and readlines() methods |
|
362 | 362 | # 2) add close_connection() methods |
|
363 | 363 | # 3) add info() and geturl() methods |
|
364 | 364 | |
|
365 | 365 | # in order to add readline(), read must be modified to deal with a |
|
366 | 366 | # buffer. example: readline must read a buffer and then spit back |
|
367 | 367 | # one line at a time. The only real alternative is to read one |
|
368 | 368 | # BYTE at a time (ick). Once something has been read, it can't be |
|
369 | 369 | # put back (ok, maybe it can, but that's even uglier than this), |
|
370 | 370 | # so if you THEN do a normal read, you must first take stuff from |
|
371 | 371 | # the buffer. |
|
372 | 372 | |
|
373 | 373 | # the read method wraps the original to accomodate buffering, |
|
374 | 374 | # although read() never adds to the buffer. |
|
375 | 375 | # Both readline and readlines have been stolen with almost no |
|
376 | 376 | # modification from socket.py |
|
377 | 377 | |
|
378 | 378 | |
|
379 | 379 | def __init__(self, sock, debuglevel=0, strict=0, method=None): |
|
380 | 380 | if method: # the httplib in python 2.3 uses the method arg |
|
381 | 381 | httplib.HTTPResponse.__init__(self, sock, debuglevel, method) |
|
382 | 382 | else: # 2.2 doesn't |
|
383 | 383 | httplib.HTTPResponse.__init__(self, sock, debuglevel) |
|
384 | 384 | self.fileno = sock.fileno |
|
385 | 385 | self.code = None |
|
386 | 386 | self._rbuf = '' |
|
387 | 387 | self._rbufsize = 8096 |
|
388 | 388 | self._handler = None # inserted by the handler later |
|
389 | 389 | self._host = None # (same) |
|
390 | 390 | self._url = None # (same) |
|
391 | 391 | self._connection = None # (same) |
|
392 | 392 | |
|
393 | 393 | _raw_read = httplib.HTTPResponse.read |
|
394 | 394 | |
|
395 | 395 | def close(self): |
|
396 | 396 | if self.fp: |
|
397 | 397 | self.fp.close() |
|
398 | 398 | self.fp = None |
|
399 | 399 | if self._handler: |
|
400 | 400 | self._handler._request_closed(self, self._host, |
|
401 | 401 | self._connection) |
|
402 | 402 | |
|
403 | 403 | def close_connection(self): |
|
404 | 404 | self._handler._remove_connection(self._host, self._connection, close=1) |
|
405 | 405 | self.close() |
|
406 | 406 | |
|
407 | 407 | def info(self): |
|
408 | 408 | return self.headers |
|
409 | 409 | |
|
410 | 410 | def geturl(self): |
|
411 | 411 | return self._url |
|
412 | 412 | |
|
413 | 413 | def read(self, amt=None): |
|
414 | 414 | # the _rbuf test is only in this first if for speed. It's not |
|
415 | 415 | # logically necessary |
|
416 | 416 | if self._rbuf and not amt is None: |
|
417 | 417 | L = len(self._rbuf) |
|
418 | 418 | if amt > L: |
|
419 | 419 | amt -= L |
|
420 | 420 | else: |
|
421 | 421 | s = self._rbuf[:amt] |
|
422 | 422 | self._rbuf = self._rbuf[amt:] |
|
423 | 423 | return s |
|
424 | 424 | |
|
425 | 425 | s = self._rbuf + self._raw_read(amt) |
|
426 | 426 | self._rbuf = '' |
|
427 | 427 | return s |
|
428 | 428 | |
|
429 | 429 | # stolen from Python SVN #68532 to fix issue1088 |
|
430 | 430 | def _read_chunked(self, amt): |
|
431 | 431 | chunk_left = self.chunk_left |
|
432 | 432 | value = '' |
|
433 | 433 | |
|
434 | 434 | # XXX This accumulates chunks by repeated string concatenation, |
|
435 | 435 | # which is not efficient as the number or size of chunks gets big. |
|
436 | 436 | while True: |
|
437 | 437 | if chunk_left is None: |
|
438 | 438 | line = self.fp.readline() |
|
439 | 439 | i = line.find(';') |
|
440 | 440 | if i >= 0: |
|
441 | 441 | line = line[:i] # strip chunk-extensions |
|
442 | 442 | try: |
|
443 | 443 | chunk_left = int(line, 16) |
|
444 | 444 | except ValueError: |
|
445 | 445 | # close the connection as protocol synchronisation is |
|
446 | 446 | # probably lost |
|
447 | 447 | self.close() |
|
448 | 448 | raise httplib.IncompleteRead(value) |
|
449 | 449 | if chunk_left == 0: |
|
450 | 450 | break |
|
451 | 451 | if amt is None: |
|
452 | 452 | value += self._safe_read(chunk_left) |
|
453 | 453 | elif amt < chunk_left: |
|
454 | 454 | value += self._safe_read(amt) |
|
455 | 455 | self.chunk_left = chunk_left - amt |
|
456 | 456 | return value |
|
457 | 457 | elif amt == chunk_left: |
|
458 | 458 | value += self._safe_read(amt) |
|
459 | 459 | self._safe_read(2) # toss the CRLF at the end of the chunk |
|
460 | 460 | self.chunk_left = None |
|
461 | 461 | return value |
|
462 | 462 | else: |
|
463 | 463 | value += self._safe_read(chunk_left) |
|
464 | 464 | amt -= chunk_left |
|
465 | 465 | |
|
466 | 466 | # we read the whole chunk, get another |
|
467 | 467 | self._safe_read(2) # toss the CRLF at the end of the chunk |
|
468 | 468 | chunk_left = None |
|
469 | 469 | |
|
470 | 470 | # read and discard trailer up to the CRLF terminator |
|
471 | 471 | ### note: we shouldn't have any trailers! |
|
472 | 472 | while True: |
|
473 | 473 | line = self.fp.readline() |
|
474 | 474 | if not line: |
|
475 | 475 | # a vanishingly small number of sites EOF without |
|
476 | 476 | # sending the trailer |
|
477 | 477 | break |
|
478 | 478 | if line == '\r\n': |
|
479 | 479 | break |
|
480 | 480 | |
|
481 | 481 | # we read everything; close the "file" |
|
482 | 482 | self.close() |
|
483 | 483 | |
|
484 | 484 | return value |
|
485 | 485 | |
|
486 | 486 | def readline(self, limit=-1): |
|
487 | 487 | i = self._rbuf.find('\n') |
|
488 | 488 | while i < 0 and not (0 < limit <= len(self._rbuf)): |
|
489 | 489 | new = self._raw_read(self._rbufsize) |
|
490 | 490 | if not new: |
|
491 | 491 | break |
|
492 | 492 | i = new.find('\n') |
|
493 | 493 | if i >= 0: |
|
494 | 494 | i = i + len(self._rbuf) |
|
495 | 495 | self._rbuf = self._rbuf + new |
|
496 | 496 | if i < 0: |
|
497 | 497 | i = len(self._rbuf) |
|
498 | 498 | else: |
|
499 | 499 | i = i + 1 |
|
500 | 500 | if 0 <= limit < len(self._rbuf): |
|
501 | 501 | i = limit |
|
502 | 502 | data, self._rbuf = self._rbuf[:i], self._rbuf[i:] |
|
503 | 503 | return data |
|
504 | 504 | |
|
505 | 505 | def readlines(self, sizehint = 0): |
|
506 | 506 | total = 0 |
|
507 | 507 | list = [] |
|
508 | 508 | while True: |
|
509 | 509 | line = self.readline() |
|
510 | 510 | if not line: |
|
511 | 511 | break |
|
512 | 512 | list.append(line) |
|
513 | 513 | total += len(line) |
|
514 | 514 | if sizehint and total >= sizehint: |
|
515 | 515 | break |
|
516 | 516 | return list |
|
517 | 517 | |
|
518 | 518 | def safesend(self, str): |
|
519 | 519 | """Send `str' to the server. |
|
520 | 520 | |
|
521 | 521 | Shamelessly ripped off from httplib to patch a bad behavior. |
|
522 | 522 | """ |
|
523 | 523 | # _broken_pipe_resp is an attribute we set in this function |
|
524 | 524 | # if the socket is closed while we're sending data but |
|
525 | 525 | # the server sent us a response before hanging up. |
|
526 | 526 | # In that case, we want to pretend to send the rest of the |
|
527 | 527 | # outgoing data, and then let the user use getresponse() |
|
528 | 528 | # (which we wrap) to get this last response before |
|
529 | 529 | # opening a new socket. |
|
530 | 530 | if getattr(self, '_broken_pipe_resp', None) is not None: |
|
531 | 531 | return |
|
532 | 532 | |
|
533 | 533 | if self.sock is None: |
|
534 | 534 | if self.auto_open: |
|
535 | 535 | self.connect() |
|
536 | 536 | else: |
|
537 |
raise httplib.NotConnected |
|
|
537 | raise httplib.NotConnected | |
|
538 | 538 | |
|
539 | 539 | # send the data to the server. if we get a broken pipe, then close |
|
540 | 540 | # the socket. we want to reconnect when somebody tries to send again. |
|
541 | 541 | # |
|
542 | 542 | # NOTE: we DO propagate the error, though, because we cannot simply |
|
543 | 543 | # ignore the error... the caller will know if they can retry. |
|
544 | 544 | if self.debuglevel > 0: |
|
545 | 545 | print "send:", repr(str) |
|
546 | 546 | try: |
|
547 | 547 | blocksize = 8192 |
|
548 | 548 | read = getattr(str, 'read', None) |
|
549 | 549 | if read is not None: |
|
550 | 550 | if self.debuglevel > 0: |
|
551 | 551 | print "sendIng a read()able" |
|
552 | 552 | data = read(blocksize) |
|
553 | 553 | while data: |
|
554 | 554 | self.sock.sendall(data) |
|
555 | 555 | data = read(blocksize) |
|
556 | 556 | else: |
|
557 | 557 | self.sock.sendall(str) |
|
558 | 558 | except socket.error, v: |
|
559 | 559 | reraise = True |
|
560 | 560 | if v[0] == errno.EPIPE: # Broken pipe |
|
561 | 561 | if self._HTTPConnection__state == httplib._CS_REQ_SENT: |
|
562 | 562 | self._broken_pipe_resp = None |
|
563 | 563 | self._broken_pipe_resp = self.getresponse() |
|
564 | 564 | reraise = False |
|
565 | 565 | self.close() |
|
566 | 566 | if reraise: |
|
567 | 567 | raise |
|
568 | 568 | |
|
569 | 569 | def wrapgetresponse(cls): |
|
570 | 570 | """Wraps getresponse in cls with a broken-pipe sane version. |
|
571 | 571 | """ |
|
572 | 572 | def safegetresponse(self): |
|
573 | 573 | # In safesend() we might set the _broken_pipe_resp |
|
574 | 574 | # attribute, in which case the socket has already |
|
575 | 575 | # been closed and we just need to give them the response |
|
576 | 576 | # back. Otherwise, we use the normal response path. |
|
577 | 577 | r = getattr(self, '_broken_pipe_resp', None) |
|
578 | 578 | if r is not None: |
|
579 | 579 | return r |
|
580 | 580 | return cls.getresponse(self) |
|
581 | 581 | safegetresponse.__doc__ = cls.getresponse.__doc__ |
|
582 | 582 | return safegetresponse |
|
583 | 583 | |
|
584 | 584 | class HTTPConnection(httplib.HTTPConnection): |
|
585 | 585 | # use the modified response class |
|
586 | 586 | response_class = HTTPResponse |
|
587 | 587 | send = safesend |
|
588 | 588 | getresponse = wrapgetresponse(httplib.HTTPConnection) |
|
589 | 589 | |
|
590 | 590 | |
|
591 | 591 | ######################################################################### |
|
592 | 592 | ##### TEST FUNCTIONS |
|
593 | 593 | ######################################################################### |
|
594 | 594 | |
|
595 | 595 | def error_handler(url): |
|
596 | 596 | global HANDLE_ERRORS |
|
597 | 597 | orig = HANDLE_ERRORS |
|
598 | 598 | keepalive_handler = HTTPHandler() |
|
599 | 599 | opener = urllib2.build_opener(keepalive_handler) |
|
600 | 600 | urllib2.install_opener(opener) |
|
601 | 601 | pos = {0: 'off', 1: 'on'} |
|
602 | 602 | for i in (0, 1): |
|
603 | 603 | print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i) |
|
604 | 604 | HANDLE_ERRORS = i |
|
605 | 605 | try: |
|
606 | 606 | fo = urllib2.urlopen(url) |
|
607 | 607 | fo.read() |
|
608 | 608 | fo.close() |
|
609 | 609 | try: |
|
610 | 610 | status, reason = fo.status, fo.reason |
|
611 | 611 | except AttributeError: |
|
612 | 612 | status, reason = None, None |
|
613 | 613 | except IOError, e: |
|
614 | 614 | print " EXCEPTION: %s" % e |
|
615 | 615 | raise |
|
616 | 616 | else: |
|
617 | 617 | print " status = %s, reason = %s" % (status, reason) |
|
618 | 618 | HANDLE_ERRORS = orig |
|
619 | 619 | hosts = keepalive_handler.open_connections() |
|
620 | 620 | print "open connections:", hosts |
|
621 | 621 | keepalive_handler.close_all() |
|
622 | 622 | |
|
623 | 623 | def md5(s): |
|
624 | 624 | try: |
|
625 | 625 | from hashlib import md5 as _md5 |
|
626 | 626 | except ImportError: |
|
627 | 627 | from md5 import md5 as _md5 |
|
628 | 628 | global md5 |
|
629 | 629 | md5 = _md5 |
|
630 | 630 | return _md5(s) |
|
631 | 631 | |
|
632 | 632 | def continuity(url): |
|
633 | 633 | format = '%25s: %s' |
|
634 | 634 | |
|
635 | 635 | # first fetch the file with the normal http handler |
|
636 | 636 | opener = urllib2.build_opener() |
|
637 | 637 | urllib2.install_opener(opener) |
|
638 | 638 | fo = urllib2.urlopen(url) |
|
639 | 639 | foo = fo.read() |
|
640 | 640 | fo.close() |
|
641 | 641 | m = md5.new(foo) |
|
642 | 642 | print format % ('normal urllib', m.hexdigest()) |
|
643 | 643 | |
|
644 | 644 | # now install the keepalive handler and try again |
|
645 | 645 | opener = urllib2.build_opener(HTTPHandler()) |
|
646 | 646 | urllib2.install_opener(opener) |
|
647 | 647 | |
|
648 | 648 | fo = urllib2.urlopen(url) |
|
649 | 649 | foo = fo.read() |
|
650 | 650 | fo.close() |
|
651 | 651 | m = md5.new(foo) |
|
652 | 652 | print format % ('keepalive read', m.hexdigest()) |
|
653 | 653 | |
|
654 | 654 | fo = urllib2.urlopen(url) |
|
655 | 655 | foo = '' |
|
656 | 656 | while True: |
|
657 | 657 | f = fo.readline() |
|
658 | 658 | if f: |
|
659 | 659 | foo = foo + f |
|
660 | 660 | else: break |
|
661 | 661 | fo.close() |
|
662 | 662 | m = md5.new(foo) |
|
663 | 663 | print format % ('keepalive readline', m.hexdigest()) |
|
664 | 664 | |
|
665 | 665 | def comp(N, url): |
|
666 | 666 | print ' making %i connections to:\n %s' % (N, url) |
|
667 | 667 | |
|
668 | 668 | sys.stdout.write(' first using the normal urllib handlers') |
|
669 | 669 | # first use normal opener |
|
670 | 670 | opener = urllib2.build_opener() |
|
671 | 671 | urllib2.install_opener(opener) |
|
672 | 672 | t1 = fetch(N, url) |
|
673 | 673 | print ' TIME: %.3f s' % t1 |
|
674 | 674 | |
|
675 | 675 | sys.stdout.write(' now using the keepalive handler ') |
|
676 | 676 | # now install the keepalive handler and try again |
|
677 | 677 | opener = urllib2.build_opener(HTTPHandler()) |
|
678 | 678 | urllib2.install_opener(opener) |
|
679 | 679 | t2 = fetch(N, url) |
|
680 | 680 | print ' TIME: %.3f s' % t2 |
|
681 | 681 | print ' improvement factor: %.2f' % (t1 / t2) |
|
682 | 682 | |
|
683 | 683 | def fetch(N, url, delay=0): |
|
684 | 684 | import time |
|
685 | 685 | lens = [] |
|
686 | 686 | starttime = time.time() |
|
687 | 687 | for i in range(N): |
|
688 | 688 | if delay and i > 0: |
|
689 | 689 | time.sleep(delay) |
|
690 | 690 | fo = urllib2.urlopen(url) |
|
691 | 691 | foo = fo.read() |
|
692 | 692 | fo.close() |
|
693 | 693 | lens.append(len(foo)) |
|
694 | 694 | diff = time.time() - starttime |
|
695 | 695 | |
|
696 | 696 | j = 0 |
|
697 | 697 | for i in lens[1:]: |
|
698 | 698 | j = j + 1 |
|
699 | 699 | if not i == lens[0]: |
|
700 | 700 | print "WARNING: inconsistent length on read %i: %i" % (j, i) |
|
701 | 701 | |
|
702 | 702 | return diff |
|
703 | 703 | |
|
704 | 704 | def test_timeout(url): |
|
705 | 705 | global DEBUG |
|
706 | 706 | dbbackup = DEBUG |
|
707 | 707 | class FakeLogger(object): |
|
708 | 708 | def debug(self, msg, *args): |
|
709 | 709 | print msg % args |
|
710 | 710 | info = warning = error = debug |
|
711 | 711 | DEBUG = FakeLogger() |
|
712 | 712 | print " fetching the file to establish a connection" |
|
713 | 713 | fo = urllib2.urlopen(url) |
|
714 | 714 | data1 = fo.read() |
|
715 | 715 | fo.close() |
|
716 | 716 | |
|
717 | 717 | i = 20 |
|
718 | 718 | print " waiting %i seconds for the server to close the connection" % i |
|
719 | 719 | while i > 0: |
|
720 | 720 | sys.stdout.write('\r %2i' % i) |
|
721 | 721 | sys.stdout.flush() |
|
722 | 722 | time.sleep(1) |
|
723 | 723 | i -= 1 |
|
724 | 724 | sys.stderr.write('\r') |
|
725 | 725 | |
|
726 | 726 | print " fetching the file a second time" |
|
727 | 727 | fo = urllib2.urlopen(url) |
|
728 | 728 | data2 = fo.read() |
|
729 | 729 | fo.close() |
|
730 | 730 | |
|
731 | 731 | if data1 == data2: |
|
732 | 732 | print ' data are identical' |
|
733 | 733 | else: |
|
734 | 734 | print ' ERROR: DATA DIFFER' |
|
735 | 735 | |
|
736 | 736 | DEBUG = dbbackup |
|
737 | 737 | |
|
738 | 738 | |
|
739 | 739 | def test(url, N=10): |
|
740 | 740 | print "checking error hander (do this on a non-200)" |
|
741 | 741 | try: error_handler(url) |
|
742 | 742 | except IOError: |
|
743 | 743 | print "exiting - exception will prevent further tests" |
|
744 | 744 | sys.exit() |
|
745 | 745 | |
|
746 | 746 | print "performing continuity test (making sure stuff isn't corrupted)" |
|
747 | 747 | continuity(url) |
|
748 | 748 | |
|
749 | 749 | print "performing speed comparison" |
|
750 | 750 | comp(N, url) |
|
751 | 751 | |
|
752 | 752 | print "performing dropped-connection check" |
|
753 | 753 | test_timeout(url) |
|
754 | 754 | |
|
755 | 755 | if __name__ == '__main__': |
|
756 | 756 | import time |
|
757 | 757 | import sys |
|
758 | 758 | try: |
|
759 | 759 | N = int(sys.argv[1]) |
|
760 | 760 | url = sys.argv[2] |
|
761 | 761 | except: |
|
762 | 762 | print "%s <integer> <url>" % sys.argv[0] |
|
763 | 763 | else: |
|
764 | 764 | test(url, N) |
@@ -1,341 +1,341 | |||
|
1 | 1 | # match.py - filename matching |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import re |
|
9 | 9 | import scmutil, util, fileset |
|
10 | 10 | from i18n import _ |
|
11 | 11 | |
|
12 | 12 | def _expandsets(pats, ctx): |
|
13 | 13 | '''convert set: patterns into a list of files in the given context''' |
|
14 | 14 | fset = set() |
|
15 | 15 | other = [] |
|
16 | 16 | |
|
17 | 17 | for kind, expr in pats: |
|
18 | 18 | if kind == 'set': |
|
19 | 19 | if not ctx: |
|
20 | 20 | raise util.Abort("fileset expression with no context") |
|
21 | 21 | s = fileset.getfileset(ctx, expr) |
|
22 | 22 | fset.update(s) |
|
23 | 23 | continue |
|
24 | 24 | other.append((kind, expr)) |
|
25 | 25 | return fset, other |
|
26 | 26 | |
|
27 | 27 | class match(object): |
|
28 | 28 | def __init__(self, root, cwd, patterns, include=[], exclude=[], |
|
29 | 29 | default='glob', exact=False, auditor=None, ctx=None): |
|
30 | 30 | """build an object to match a set of file patterns |
|
31 | 31 | |
|
32 | 32 | arguments: |
|
33 | 33 | root - the canonical root of the tree you're matching against |
|
34 | 34 | cwd - the current working directory, if relevant |
|
35 | 35 | patterns - patterns to find |
|
36 | 36 | include - patterns to include |
|
37 | 37 | exclude - patterns to exclude |
|
38 | 38 | default - if a pattern in names has no explicit type, assume this one |
|
39 | 39 | exact - patterns are actually literals |
|
40 | 40 | |
|
41 | 41 | a pattern is one of: |
|
42 | 42 | 'glob:<glob>' - a glob relative to cwd |
|
43 | 43 | 're:<regexp>' - a regular expression |
|
44 | 44 | 'path:<path>' - a path relative to canonroot |
|
45 | 45 | 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs) |
|
46 | 46 | 'relpath:<path>' - a path relative to cwd |
|
47 | 47 | 'relre:<regexp>' - a regexp that needn't match the start of a name |
|
48 | 48 | 'set:<fileset>' - a fileset expression |
|
49 | 49 | '<something>' - a pattern of the specified default type |
|
50 | 50 | """ |
|
51 | 51 | |
|
52 | 52 | self._root = root |
|
53 | 53 | self._cwd = cwd |
|
54 | 54 | self._files = [] |
|
55 | 55 | self._anypats = bool(include or exclude) |
|
56 | 56 | self._ctx = ctx |
|
57 | 57 | |
|
58 | 58 | if include: |
|
59 | 59 | pats = _normalize(include, 'glob', root, cwd, auditor) |
|
60 | 60 | self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)') |
|
61 | 61 | if exclude: |
|
62 | 62 | pats = _normalize(exclude, 'glob', root, cwd, auditor) |
|
63 | 63 | self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)') |
|
64 | 64 | if exact: |
|
65 | 65 | self._files = patterns |
|
66 | 66 | pm = self.exact |
|
67 | 67 | elif patterns: |
|
68 | 68 | pats = _normalize(patterns, default, root, cwd, auditor) |
|
69 | 69 | self._files = _roots(pats) |
|
70 | 70 | self._anypats = self._anypats or _anypats(pats) |
|
71 | 71 | self.patternspat, pm = _buildmatch(ctx, pats, '$') |
|
72 | 72 | |
|
73 | 73 | if patterns or exact: |
|
74 | 74 | if include: |
|
75 | 75 | if exclude: |
|
76 | 76 | m = lambda f: im(f) and not em(f) and pm(f) |
|
77 | 77 | else: |
|
78 | 78 | m = lambda f: im(f) and pm(f) |
|
79 | 79 | else: |
|
80 | 80 | if exclude: |
|
81 | 81 | m = lambda f: not em(f) and pm(f) |
|
82 | 82 | else: |
|
83 | 83 | m = pm |
|
84 | 84 | else: |
|
85 | 85 | if include: |
|
86 | 86 | if exclude: |
|
87 | 87 | m = lambda f: im(f) and not em(f) |
|
88 | 88 | else: |
|
89 | 89 | m = im |
|
90 | 90 | else: |
|
91 | 91 | if exclude: |
|
92 | 92 | m = lambda f: not em(f) |
|
93 | 93 | else: |
|
94 | 94 | m = lambda f: True |
|
95 | 95 | |
|
96 | 96 | self.matchfn = m |
|
97 | 97 | self._fmap = set(self._files) |
|
98 | 98 | |
|
99 | 99 | def __call__(self, fn): |
|
100 | 100 | return self.matchfn(fn) |
|
101 | 101 | def __iter__(self): |
|
102 | 102 | for f in self._files: |
|
103 | 103 | yield f |
|
104 | 104 | def bad(self, f, msg): |
|
105 | 105 | '''callback for each explicit file that can't be |
|
106 | 106 | found/accessed, with an error message |
|
107 | 107 | ''' |
|
108 | 108 | pass |
|
109 | 109 | def dir(self, f): |
|
110 | 110 | pass |
|
111 | 111 | def missing(self, f): |
|
112 | 112 | pass |
|
113 | 113 | def exact(self, f): |
|
114 | 114 | return f in self._fmap |
|
115 | 115 | def rel(self, f): |
|
116 | 116 | return util.pathto(self._root, self._cwd, f) |
|
117 | 117 | def files(self): |
|
118 | 118 | return self._files |
|
119 | 119 | def anypats(self): |
|
120 | 120 | return self._anypats |
|
121 | 121 | def always(self): |
|
122 | 122 | return False |
|
123 | 123 | |
|
124 | 124 | class exact(match): |
|
125 | 125 | def __init__(self, root, cwd, files): |
|
126 | 126 | match.__init__(self, root, cwd, files, exact = True) |
|
127 | 127 | |
|
128 | 128 | class always(match): |
|
129 | 129 | def __init__(self, root, cwd): |
|
130 | 130 | match.__init__(self, root, cwd, []) |
|
131 | 131 | def always(self): |
|
132 | 132 | return True |
|
133 | 133 | |
|
134 | 134 | class narrowmatcher(match): |
|
135 | 135 | """Adapt a matcher to work on a subdirectory only. |
|
136 | 136 | |
|
137 | 137 | The paths are remapped to remove/insert the path as needed: |
|
138 | 138 | |
|
139 | 139 | >>> m1 = match('root', '', ['a.txt', 'sub/b.txt']) |
|
140 | 140 | >>> m2 = narrowmatcher('sub', m1) |
|
141 | 141 | >>> bool(m2('a.txt')) |
|
142 | 142 | False |
|
143 | 143 | >>> bool(m2('b.txt')) |
|
144 | 144 | True |
|
145 | 145 | >>> bool(m2.matchfn('a.txt')) |
|
146 | 146 | False |
|
147 | 147 | >>> bool(m2.matchfn('b.txt')) |
|
148 | 148 | True |
|
149 | 149 | >>> m2.files() |
|
150 | 150 | ['b.txt'] |
|
151 | 151 | >>> m2.exact('b.txt') |
|
152 | 152 | True |
|
153 | 153 | >>> m2.rel('b.txt') |
|
154 | 154 | 'b.txt' |
|
155 | 155 | >>> def bad(f, msg): |
|
156 | 156 | ... print "%s: %s" % (f, msg) |
|
157 | 157 | >>> m1.bad = bad |
|
158 | 158 | >>> m2.bad('x.txt', 'No such file') |
|
159 | 159 | sub/x.txt: No such file |
|
160 | 160 | """ |
|
161 | 161 | |
|
162 | 162 | def __init__(self, path, matcher): |
|
163 | 163 | self._root = matcher._root |
|
164 | 164 | self._cwd = matcher._cwd |
|
165 | 165 | self._path = path |
|
166 | 166 | self._matcher = matcher |
|
167 | 167 | |
|
168 | 168 | self._files = [f[len(path) + 1:] for f in matcher._files |
|
169 | 169 | if f.startswith(path + "/")] |
|
170 | 170 | self._anypats = matcher._anypats |
|
171 | 171 | self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn) |
|
172 | 172 | self._fmap = set(self._files) |
|
173 | 173 | |
|
174 | 174 | def bad(self, f, msg): |
|
175 | 175 | self._matcher.bad(self._path + "/" + f, msg) |
|
176 | 176 | |
|
177 | 177 | def patkind(pat): |
|
178 | 178 | return _patsplit(pat, None)[0] |
|
179 | 179 | |
|
180 | 180 | def _patsplit(pat, default): |
|
181 | 181 | """Split a string into an optional pattern kind prefix and the |
|
182 | 182 | actual pattern.""" |
|
183 | 183 | if ':' in pat: |
|
184 | 184 | kind, val = pat.split(':', 1) |
|
185 | 185 | if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre', |
|
186 | 186 | 'listfile', 'listfile0', 'set'): |
|
187 | 187 | return kind, val |
|
188 | 188 | return default, pat |
|
189 | 189 | |
|
190 | 190 | def _globre(pat): |
|
191 | 191 | "convert a glob pattern into a regexp" |
|
192 | 192 | i, n = 0, len(pat) |
|
193 | 193 | res = '' |
|
194 | 194 | group = 0 |
|
195 | 195 | escape = re.escape |
|
196 | 196 | def peek(): |
|
197 | 197 | return i < n and pat[i] |
|
198 | 198 | while i < n: |
|
199 | 199 | c = pat[i] |
|
200 | 200 | i += 1 |
|
201 | 201 | if c not in '*?[{},\\': |
|
202 | 202 | res += escape(c) |
|
203 | 203 | elif c == '*': |
|
204 | 204 | if peek() == '*': |
|
205 | 205 | i += 1 |
|
206 | 206 | res += '.*' |
|
207 | 207 | else: |
|
208 | 208 | res += '[^/]*' |
|
209 | 209 | elif c == '?': |
|
210 | 210 | res += '.' |
|
211 | 211 | elif c == '[': |
|
212 | 212 | j = i |
|
213 | 213 | if j < n and pat[j] in '!]': |
|
214 | 214 | j += 1 |
|
215 | 215 | while j < n and pat[j] != ']': |
|
216 | 216 | j += 1 |
|
217 | 217 | if j >= n: |
|
218 | 218 | res += '\\[' |
|
219 | 219 | else: |
|
220 | 220 | stuff = pat[i:j].replace('\\','\\\\') |
|
221 | 221 | i = j + 1 |
|
222 | 222 | if stuff[0] == '!': |
|
223 | 223 | stuff = '^' + stuff[1:] |
|
224 | 224 | elif stuff[0] == '^': |
|
225 | 225 | stuff = '\\' + stuff |
|
226 | 226 | res = '%s[%s]' % (res, stuff) |
|
227 | 227 | elif c == '{': |
|
228 | 228 | group += 1 |
|
229 | 229 | res += '(?:' |
|
230 | 230 | elif c == '}' and group: |
|
231 | 231 | res += ')' |
|
232 | 232 | group -= 1 |
|
233 | 233 | elif c == ',' and group: |
|
234 | 234 | res += '|' |
|
235 | 235 | elif c == '\\': |
|
236 | 236 | p = peek() |
|
237 | 237 | if p: |
|
238 | 238 | i += 1 |
|
239 | 239 | res += escape(p) |
|
240 | 240 | else: |
|
241 | 241 | res += escape(c) |
|
242 | 242 | else: |
|
243 | 243 | res += escape(c) |
|
244 | 244 | return res |
|
245 | 245 | |
|
246 | 246 | def _regex(kind, name, tail): |
|
247 | 247 | '''convert a pattern into a regular expression''' |
|
248 | 248 | if not name: |
|
249 | 249 | return '' |
|
250 | 250 | if kind == 're': |
|
251 | 251 | return name |
|
252 | 252 | elif kind == 'path': |
|
253 | 253 | return '^' + re.escape(name) + '(?:/|$)' |
|
254 | 254 | elif kind == 'relglob': |
|
255 | 255 | return '(?:|.*/)' + _globre(name) + tail |
|
256 | 256 | elif kind == 'relpath': |
|
257 | 257 | return re.escape(name) + '(?:/|$)' |
|
258 | 258 | elif kind == 'relre': |
|
259 | 259 | if name.startswith('^'): |
|
260 | 260 | return name |
|
261 | 261 | return '.*' + name |
|
262 | 262 | return _globre(name) + tail |
|
263 | 263 | |
|
264 | 264 | def _buildmatch(ctx, pats, tail): |
|
265 | 265 | fset, pats = _expandsets(pats, ctx) |
|
266 | 266 | if not pats: |
|
267 | 267 | return "", fset.__contains__ |
|
268 | 268 | |
|
269 | 269 | pat, mf = _buildregexmatch(pats, tail) |
|
270 | 270 | if fset: |
|
271 | 271 | return pat, lambda f: f in fset or mf(f) |
|
272 | 272 | return pat, mf |
|
273 | 273 | |
|
274 | 274 | def _buildregexmatch(pats, tail): |
|
275 | 275 | """build a matching function from a set of patterns""" |
|
276 | 276 | try: |
|
277 | 277 | pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats]) |
|
278 | 278 | if len(pat) > 20000: |
|
279 |
raise OverflowError |
|
|
279 | raise OverflowError | |
|
280 | 280 | return pat, re.compile(pat).match |
|
281 | 281 | except OverflowError: |
|
282 | 282 | # We're using a Python with a tiny regex engine and we |
|
283 | 283 | # made it explode, so we'll divide the pattern list in two |
|
284 | 284 | # until it works |
|
285 | 285 | l = len(pats) |
|
286 | 286 | if l < 2: |
|
287 | 287 | raise |
|
288 | 288 | pata, a = _buildregexmatch(pats[:l//2], tail) |
|
289 | 289 | patb, b = _buildregexmatch(pats[l//2:], tail) |
|
290 | 290 | return pat, lambda s: a(s) or b(s) |
|
291 | 291 | except re.error: |
|
292 | 292 | for k, p in pats: |
|
293 | 293 | try: |
|
294 | 294 | re.compile('(?:%s)' % _regex(k, p, tail)) |
|
295 | 295 | except re.error: |
|
296 | 296 | raise util.Abort(_("invalid pattern (%s): %s") % (k, p)) |
|
297 | 297 | raise util.Abort(_("invalid pattern")) |
|
298 | 298 | |
|
299 | 299 | def _normalize(names, default, root, cwd, auditor): |
|
300 | 300 | pats = [] |
|
301 | 301 | for kind, name in [_patsplit(p, default) for p in names]: |
|
302 | 302 | if kind in ('glob', 'relpath'): |
|
303 | 303 | name = scmutil.canonpath(root, cwd, name, auditor) |
|
304 | 304 | elif kind in ('relglob', 'path'): |
|
305 | 305 | name = util.normpath(name) |
|
306 | 306 | elif kind in ('listfile', 'listfile0'): |
|
307 | 307 | try: |
|
308 | 308 | files = util.readfile(name) |
|
309 | 309 | if kind == 'listfile0': |
|
310 | 310 | files = files.split('\0') |
|
311 | 311 | else: |
|
312 | 312 | files = files.splitlines() |
|
313 | 313 | files = [f for f in files if f] |
|
314 | 314 | except EnvironmentError: |
|
315 | 315 | raise util.Abort(_("unable to read file list (%s)") % name) |
|
316 | 316 | pats += _normalize(files, default, root, cwd, auditor) |
|
317 | 317 | continue |
|
318 | 318 | |
|
319 | 319 | pats.append((kind, name)) |
|
320 | 320 | return pats |
|
321 | 321 | |
|
322 | 322 | def _roots(patterns): |
|
323 | 323 | r = [] |
|
324 | 324 | for kind, name in patterns: |
|
325 | 325 | if kind == 'glob': # find the non-glob prefix |
|
326 | 326 | root = [] |
|
327 | 327 | for p in name.split('/'): |
|
328 | 328 | if '[' in p or '{' in p or '*' in p or '?' in p: |
|
329 | 329 | break |
|
330 | 330 | root.append(p) |
|
331 | 331 | r.append('/'.join(root) or '.') |
|
332 | 332 | elif kind in ('relpath', 'path'): |
|
333 | 333 | r.append(name or '.') |
|
334 | 334 | elif kind == 'relglob': |
|
335 | 335 | r.append('.') |
|
336 | 336 | return r |
|
337 | 337 | |
|
338 | 338 | def _anypats(patterns): |
|
339 | 339 | for kind, name in patterns: |
|
340 | 340 | if kind in ('glob', 're', 'relglob', 'relre', 'set'): |
|
341 | 341 | return True |
@@ -1,1890 +1,1890 | |||
|
1 | 1 | # patch.py - patch file parsing routines |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Brendan Cully <brendan@kublai.com> |
|
4 | 4 | # Copyright 2007 Chris Mason <chris.mason@oracle.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | import cStringIO, email.Parser, os, errno, re |
|
10 | 10 | import tempfile, zlib, shutil |
|
11 | 11 | |
|
12 | 12 | from i18n import _ |
|
13 | 13 | from node import hex, nullid, short |
|
14 | 14 | import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error |
|
15 | 15 | import context |
|
16 | 16 | |
|
17 | 17 | gitre = re.compile('diff --git a/(.*) b/(.*)') |
|
18 | 18 | |
|
19 | 19 | class PatchError(Exception): |
|
20 | 20 | pass |
|
21 | 21 | |
|
22 | 22 | |
|
23 | 23 | # public functions |
|
24 | 24 | |
|
25 | 25 | def split(stream): |
|
26 | 26 | '''return an iterator of individual patches from a stream''' |
|
27 | 27 | def isheader(line, inheader): |
|
28 | 28 | if inheader and line[0] in (' ', '\t'): |
|
29 | 29 | # continuation |
|
30 | 30 | return True |
|
31 | 31 | if line[0] in (' ', '-', '+'): |
|
32 | 32 | # diff line - don't check for header pattern in there |
|
33 | 33 | return False |
|
34 | 34 | l = line.split(': ', 1) |
|
35 | 35 | return len(l) == 2 and ' ' not in l[0] |
|
36 | 36 | |
|
37 | 37 | def chunk(lines): |
|
38 | 38 | return cStringIO.StringIO(''.join(lines)) |
|
39 | 39 | |
|
40 | 40 | def hgsplit(stream, cur): |
|
41 | 41 | inheader = True |
|
42 | 42 | |
|
43 | 43 | for line in stream: |
|
44 | 44 | if not line.strip(): |
|
45 | 45 | inheader = False |
|
46 | 46 | if not inheader and line.startswith('# HG changeset patch'): |
|
47 | 47 | yield chunk(cur) |
|
48 | 48 | cur = [] |
|
49 | 49 | inheader = True |
|
50 | 50 | |
|
51 | 51 | cur.append(line) |
|
52 | 52 | |
|
53 | 53 | if cur: |
|
54 | 54 | yield chunk(cur) |
|
55 | 55 | |
|
56 | 56 | def mboxsplit(stream, cur): |
|
57 | 57 | for line in stream: |
|
58 | 58 | if line.startswith('From '): |
|
59 | 59 | for c in split(chunk(cur[1:])): |
|
60 | 60 | yield c |
|
61 | 61 | cur = [] |
|
62 | 62 | |
|
63 | 63 | cur.append(line) |
|
64 | 64 | |
|
65 | 65 | if cur: |
|
66 | 66 | for c in split(chunk(cur[1:])): |
|
67 | 67 | yield c |
|
68 | 68 | |
|
69 | 69 | def mimesplit(stream, cur): |
|
70 | 70 | def msgfp(m): |
|
71 | 71 | fp = cStringIO.StringIO() |
|
72 | 72 | g = email.Generator.Generator(fp, mangle_from_=False) |
|
73 | 73 | g.flatten(m) |
|
74 | 74 | fp.seek(0) |
|
75 | 75 | return fp |
|
76 | 76 | |
|
77 | 77 | for line in stream: |
|
78 | 78 | cur.append(line) |
|
79 | 79 | c = chunk(cur) |
|
80 | 80 | |
|
81 | 81 | m = email.Parser.Parser().parse(c) |
|
82 | 82 | if not m.is_multipart(): |
|
83 | 83 | yield msgfp(m) |
|
84 | 84 | else: |
|
85 | 85 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') |
|
86 | 86 | for part in m.walk(): |
|
87 | 87 | ct = part.get_content_type() |
|
88 | 88 | if ct not in ok_types: |
|
89 | 89 | continue |
|
90 | 90 | yield msgfp(part) |
|
91 | 91 | |
|
92 | 92 | def headersplit(stream, cur): |
|
93 | 93 | inheader = False |
|
94 | 94 | |
|
95 | 95 | for line in stream: |
|
96 | 96 | if not inheader and isheader(line, inheader): |
|
97 | 97 | yield chunk(cur) |
|
98 | 98 | cur = [] |
|
99 | 99 | inheader = True |
|
100 | 100 | if inheader and not isheader(line, inheader): |
|
101 | 101 | inheader = False |
|
102 | 102 | |
|
103 | 103 | cur.append(line) |
|
104 | 104 | |
|
105 | 105 | if cur: |
|
106 | 106 | yield chunk(cur) |
|
107 | 107 | |
|
108 | 108 | def remainder(cur): |
|
109 | 109 | yield chunk(cur) |
|
110 | 110 | |
|
111 | 111 | class fiter(object): |
|
112 | 112 | def __init__(self, fp): |
|
113 | 113 | self.fp = fp |
|
114 | 114 | |
|
115 | 115 | def __iter__(self): |
|
116 | 116 | return self |
|
117 | 117 | |
|
118 | 118 | def next(self): |
|
119 | 119 | l = self.fp.readline() |
|
120 | 120 | if not l: |
|
121 | 121 | raise StopIteration |
|
122 | 122 | return l |
|
123 | 123 | |
|
124 | 124 | inheader = False |
|
125 | 125 | cur = [] |
|
126 | 126 | |
|
127 | 127 | mimeheaders = ['content-type'] |
|
128 | 128 | |
|
129 | 129 | if not util.safehasattr(stream, 'next'): |
|
130 | 130 | # http responses, for example, have readline but not next |
|
131 | 131 | stream = fiter(stream) |
|
132 | 132 | |
|
133 | 133 | for line in stream: |
|
134 | 134 | cur.append(line) |
|
135 | 135 | if line.startswith('# HG changeset patch'): |
|
136 | 136 | return hgsplit(stream, cur) |
|
137 | 137 | elif line.startswith('From '): |
|
138 | 138 | return mboxsplit(stream, cur) |
|
139 | 139 | elif isheader(line, inheader): |
|
140 | 140 | inheader = True |
|
141 | 141 | if line.split(':', 1)[0].lower() in mimeheaders: |
|
142 | 142 | # let email parser handle this |
|
143 | 143 | return mimesplit(stream, cur) |
|
144 | 144 | elif line.startswith('--- ') and inheader: |
|
145 | 145 | # No evil headers seen by diff start, split by hand |
|
146 | 146 | return headersplit(stream, cur) |
|
147 | 147 | # Not enough info, keep reading |
|
148 | 148 | |
|
149 | 149 | # if we are here, we have a very plain patch |
|
150 | 150 | return remainder(cur) |
|
151 | 151 | |
|
152 | 152 | def extract(ui, fileobj): |
|
153 | 153 | '''extract patch from data read from fileobj. |
|
154 | 154 | |
|
155 | 155 | patch can be a normal patch or contained in an email message. |
|
156 | 156 | |
|
157 | 157 | return tuple (filename, message, user, date, branch, node, p1, p2). |
|
158 | 158 | Any item in the returned tuple can be None. If filename is None, |
|
159 | 159 | fileobj did not contain a patch. Caller must unlink filename when done.''' |
|
160 | 160 | |
|
161 | 161 | # attempt to detect the start of a patch |
|
162 | 162 | # (this heuristic is borrowed from quilt) |
|
163 | 163 | diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |' |
|
164 | 164 | r'retrieving revision [0-9]+(\.[0-9]+)*$|' |
|
165 | 165 | r'---[ \t].*?^\+\+\+[ \t]|' |
|
166 | 166 | r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL) |
|
167 | 167 | |
|
168 | 168 | fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') |
|
169 | 169 | tmpfp = os.fdopen(fd, 'w') |
|
170 | 170 | try: |
|
171 | 171 | msg = email.Parser.Parser().parse(fileobj) |
|
172 | 172 | |
|
173 | 173 | subject = msg['Subject'] |
|
174 | 174 | user = msg['From'] |
|
175 | 175 | if not subject and not user: |
|
176 | 176 | # Not an email, restore parsed headers if any |
|
177 | 177 | subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n' |
|
178 | 178 | |
|
179 | 179 | gitsendmail = 'git-send-email' in msg.get('X-Mailer', '') |
|
180 | 180 | # should try to parse msg['Date'] |
|
181 | 181 | date = None |
|
182 | 182 | nodeid = None |
|
183 | 183 | branch = None |
|
184 | 184 | parents = [] |
|
185 | 185 | |
|
186 | 186 | if subject: |
|
187 | 187 | if subject.startswith('[PATCH'): |
|
188 | 188 | pend = subject.find(']') |
|
189 | 189 | if pend >= 0: |
|
190 | 190 | subject = subject[pend + 1:].lstrip() |
|
191 | 191 | subject = re.sub(r'\n[ \t]+', ' ', subject) |
|
192 | 192 | ui.debug('Subject: %s\n' % subject) |
|
193 | 193 | if user: |
|
194 | 194 | ui.debug('From: %s\n' % user) |
|
195 | 195 | diffs_seen = 0 |
|
196 | 196 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') |
|
197 | 197 | message = '' |
|
198 | 198 | for part in msg.walk(): |
|
199 | 199 | content_type = part.get_content_type() |
|
200 | 200 | ui.debug('Content-Type: %s\n' % content_type) |
|
201 | 201 | if content_type not in ok_types: |
|
202 | 202 | continue |
|
203 | 203 | payload = part.get_payload(decode=True) |
|
204 | 204 | m = diffre.search(payload) |
|
205 | 205 | if m: |
|
206 | 206 | hgpatch = False |
|
207 | 207 | hgpatchheader = False |
|
208 | 208 | ignoretext = False |
|
209 | 209 | |
|
210 | 210 | ui.debug('found patch at byte %d\n' % m.start(0)) |
|
211 | 211 | diffs_seen += 1 |
|
212 | 212 | cfp = cStringIO.StringIO() |
|
213 | 213 | for line in payload[:m.start(0)].splitlines(): |
|
214 | 214 | if line.startswith('# HG changeset patch') and not hgpatch: |
|
215 | 215 | ui.debug('patch generated by hg export\n') |
|
216 | 216 | hgpatch = True |
|
217 | 217 | hgpatchheader = True |
|
218 | 218 | # drop earlier commit message content |
|
219 | 219 | cfp.seek(0) |
|
220 | 220 | cfp.truncate() |
|
221 | 221 | subject = None |
|
222 | 222 | elif hgpatchheader: |
|
223 | 223 | if line.startswith('# User '): |
|
224 | 224 | user = line[7:] |
|
225 | 225 | ui.debug('From: %s\n' % user) |
|
226 | 226 | elif line.startswith("# Date "): |
|
227 | 227 | date = line[7:] |
|
228 | 228 | elif line.startswith("# Branch "): |
|
229 | 229 | branch = line[9:] |
|
230 | 230 | elif line.startswith("# Node ID "): |
|
231 | 231 | nodeid = line[10:] |
|
232 | 232 | elif line.startswith("# Parent "): |
|
233 | 233 | parents.append(line[9:].lstrip()) |
|
234 | 234 | elif not line.startswith("# "): |
|
235 | 235 | hgpatchheader = False |
|
236 | 236 | elif line == '---' and gitsendmail: |
|
237 | 237 | ignoretext = True |
|
238 | 238 | if not hgpatchheader and not ignoretext: |
|
239 | 239 | cfp.write(line) |
|
240 | 240 | cfp.write('\n') |
|
241 | 241 | message = cfp.getvalue() |
|
242 | 242 | if tmpfp: |
|
243 | 243 | tmpfp.write(payload) |
|
244 | 244 | if not payload.endswith('\n'): |
|
245 | 245 | tmpfp.write('\n') |
|
246 | 246 | elif not diffs_seen and message and content_type == 'text/plain': |
|
247 | 247 | message += '\n' + payload |
|
248 | 248 | except: |
|
249 | 249 | tmpfp.close() |
|
250 | 250 | os.unlink(tmpname) |
|
251 | 251 | raise |
|
252 | 252 | |
|
253 | 253 | if subject and not message.startswith(subject): |
|
254 | 254 | message = '%s\n%s' % (subject, message) |
|
255 | 255 | tmpfp.close() |
|
256 | 256 | if not diffs_seen: |
|
257 | 257 | os.unlink(tmpname) |
|
258 | 258 | return None, message, user, date, branch, None, None, None |
|
259 | 259 | p1 = parents and parents.pop(0) or None |
|
260 | 260 | p2 = parents and parents.pop(0) or None |
|
261 | 261 | return tmpname, message, user, date, branch, nodeid, p1, p2 |
|
262 | 262 | |
|
263 | 263 | class patchmeta(object): |
|
264 | 264 | """Patched file metadata |
|
265 | 265 | |
|
266 | 266 | 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY |
|
267 | 267 | or COPY. 'path' is patched file path. 'oldpath' is set to the |
|
268 | 268 | origin file when 'op' is either COPY or RENAME, None otherwise. If |
|
269 | 269 | file mode is changed, 'mode' is a tuple (islink, isexec) where |
|
270 | 270 | 'islink' is True if the file is a symlink and 'isexec' is True if |
|
271 | 271 | the file is executable. Otherwise, 'mode' is None. |
|
272 | 272 | """ |
|
273 | 273 | def __init__(self, path): |
|
274 | 274 | self.path = path |
|
275 | 275 | self.oldpath = None |
|
276 | 276 | self.mode = None |
|
277 | 277 | self.op = 'MODIFY' |
|
278 | 278 | self.binary = False |
|
279 | 279 | |
|
280 | 280 | def setmode(self, mode): |
|
281 | 281 | islink = mode & 020000 |
|
282 | 282 | isexec = mode & 0100 |
|
283 | 283 | self.mode = (islink, isexec) |
|
284 | 284 | |
|
285 | 285 | def copy(self): |
|
286 | 286 | other = patchmeta(self.path) |
|
287 | 287 | other.oldpath = self.oldpath |
|
288 | 288 | other.mode = self.mode |
|
289 | 289 | other.op = self.op |
|
290 | 290 | other.binary = self.binary |
|
291 | 291 | return other |
|
292 | 292 | |
|
293 | 293 | def _ispatchinga(self, afile): |
|
294 | 294 | if afile == '/dev/null': |
|
295 | 295 | return self.op == 'ADD' |
|
296 | 296 | return afile == 'a/' + (self.oldpath or self.path) |
|
297 | 297 | |
|
298 | 298 | def _ispatchingb(self, bfile): |
|
299 | 299 | if bfile == '/dev/null': |
|
300 | 300 | return self.op == 'DELETE' |
|
301 | 301 | return bfile == 'b/' + self.path |
|
302 | 302 | |
|
303 | 303 | def ispatching(self, afile, bfile): |
|
304 | 304 | return self._ispatchinga(afile) and self._ispatchingb(bfile) |
|
305 | 305 | |
|
306 | 306 | def __repr__(self): |
|
307 | 307 | return "<patchmeta %s %r>" % (self.op, self.path) |
|
308 | 308 | |
|
309 | 309 | def readgitpatch(lr): |
|
310 | 310 | """extract git-style metadata about patches from <patchname>""" |
|
311 | 311 | |
|
312 | 312 | # Filter patch for git information |
|
313 | 313 | gp = None |
|
314 | 314 | gitpatches = [] |
|
315 | 315 | for line in lr: |
|
316 | 316 | line = line.rstrip(' \r\n') |
|
317 | 317 | if line.startswith('diff --git'): |
|
318 | 318 | m = gitre.match(line) |
|
319 | 319 | if m: |
|
320 | 320 | if gp: |
|
321 | 321 | gitpatches.append(gp) |
|
322 | 322 | dst = m.group(2) |
|
323 | 323 | gp = patchmeta(dst) |
|
324 | 324 | elif gp: |
|
325 | 325 | if line.startswith('--- '): |
|
326 | 326 | gitpatches.append(gp) |
|
327 | 327 | gp = None |
|
328 | 328 | continue |
|
329 | 329 | if line.startswith('rename from '): |
|
330 | 330 | gp.op = 'RENAME' |
|
331 | 331 | gp.oldpath = line[12:] |
|
332 | 332 | elif line.startswith('rename to '): |
|
333 | 333 | gp.path = line[10:] |
|
334 | 334 | elif line.startswith('copy from '): |
|
335 | 335 | gp.op = 'COPY' |
|
336 | 336 | gp.oldpath = line[10:] |
|
337 | 337 | elif line.startswith('copy to '): |
|
338 | 338 | gp.path = line[8:] |
|
339 | 339 | elif line.startswith('deleted file'): |
|
340 | 340 | gp.op = 'DELETE' |
|
341 | 341 | elif line.startswith('new file mode '): |
|
342 | 342 | gp.op = 'ADD' |
|
343 | 343 | gp.setmode(int(line[-6:], 8)) |
|
344 | 344 | elif line.startswith('new mode '): |
|
345 | 345 | gp.setmode(int(line[-6:], 8)) |
|
346 | 346 | elif line.startswith('GIT binary patch'): |
|
347 | 347 | gp.binary = True |
|
348 | 348 | if gp: |
|
349 | 349 | gitpatches.append(gp) |
|
350 | 350 | |
|
351 | 351 | return gitpatches |
|
352 | 352 | |
|
353 | 353 | class linereader(object): |
|
354 | 354 | # simple class to allow pushing lines back into the input stream |
|
355 | 355 | def __init__(self, fp): |
|
356 | 356 | self.fp = fp |
|
357 | 357 | self.buf = [] |
|
358 | 358 | |
|
359 | 359 | def push(self, line): |
|
360 | 360 | if line is not None: |
|
361 | 361 | self.buf.append(line) |
|
362 | 362 | |
|
363 | 363 | def readline(self): |
|
364 | 364 | if self.buf: |
|
365 | 365 | l = self.buf[0] |
|
366 | 366 | del self.buf[0] |
|
367 | 367 | return l |
|
368 | 368 | return self.fp.readline() |
|
369 | 369 | |
|
370 | 370 | def __iter__(self): |
|
371 | 371 | while True: |
|
372 | 372 | l = self.readline() |
|
373 | 373 | if not l: |
|
374 | 374 | break |
|
375 | 375 | yield l |
|
376 | 376 | |
|
377 | 377 | class abstractbackend(object): |
|
378 | 378 | def __init__(self, ui): |
|
379 | 379 | self.ui = ui |
|
380 | 380 | |
|
381 | 381 | def getfile(self, fname): |
|
382 | 382 | """Return target file data and flags as a (data, (islink, |
|
383 | 383 | isexec)) tuple. |
|
384 | 384 | """ |
|
385 | 385 | raise NotImplementedError |
|
386 | 386 | |
|
387 | 387 | def setfile(self, fname, data, mode, copysource): |
|
388 | 388 | """Write data to target file fname and set its mode. mode is a |
|
389 | 389 | (islink, isexec) tuple. If data is None, the file content should |
|
390 | 390 | be left unchanged. If the file is modified after being copied, |
|
391 | 391 | copysource is set to the original file name. |
|
392 | 392 | """ |
|
393 | 393 | raise NotImplementedError |
|
394 | 394 | |
|
395 | 395 | def unlink(self, fname): |
|
396 | 396 | """Unlink target file.""" |
|
397 | 397 | raise NotImplementedError |
|
398 | 398 | |
|
399 | 399 | def writerej(self, fname, failed, total, lines): |
|
400 | 400 | """Write rejected lines for fname. total is the number of hunks |
|
401 | 401 | which failed to apply and total the total number of hunks for this |
|
402 | 402 | files. |
|
403 | 403 | """ |
|
404 | 404 | pass |
|
405 | 405 | |
|
406 | 406 | def exists(self, fname): |
|
407 | 407 | raise NotImplementedError |
|
408 | 408 | |
|
409 | 409 | class fsbackend(abstractbackend): |
|
410 | 410 | def __init__(self, ui, basedir): |
|
411 | 411 | super(fsbackend, self).__init__(ui) |
|
412 | 412 | self.opener = scmutil.opener(basedir) |
|
413 | 413 | |
|
414 | 414 | def _join(self, f): |
|
415 | 415 | return os.path.join(self.opener.base, f) |
|
416 | 416 | |
|
417 | 417 | def getfile(self, fname): |
|
418 | 418 | path = self._join(fname) |
|
419 | 419 | if os.path.islink(path): |
|
420 | 420 | return (os.readlink(path), (True, False)) |
|
421 | 421 | isexec = False |
|
422 | 422 | try: |
|
423 | 423 | isexec = os.lstat(path).st_mode & 0100 != 0 |
|
424 | 424 | except OSError, e: |
|
425 | 425 | if e.errno != errno.ENOENT: |
|
426 | 426 | raise |
|
427 | 427 | return (self.opener.read(fname), (False, isexec)) |
|
428 | 428 | |
|
429 | 429 | def setfile(self, fname, data, mode, copysource): |
|
430 | 430 | islink, isexec = mode |
|
431 | 431 | if data is None: |
|
432 | 432 | util.setflags(self._join(fname), islink, isexec) |
|
433 | 433 | return |
|
434 | 434 | if islink: |
|
435 | 435 | self.opener.symlink(data, fname) |
|
436 | 436 | else: |
|
437 | 437 | self.opener.write(fname, data) |
|
438 | 438 | if isexec: |
|
439 | 439 | util.setflags(self._join(fname), False, True) |
|
440 | 440 | |
|
441 | 441 | def unlink(self, fname): |
|
442 | 442 | try: |
|
443 | 443 | util.unlinkpath(self._join(fname)) |
|
444 | 444 | except OSError, inst: |
|
445 | 445 | if inst.errno != errno.ENOENT: |
|
446 | 446 | raise |
|
447 | 447 | |
|
448 | 448 | def writerej(self, fname, failed, total, lines): |
|
449 | 449 | fname = fname + ".rej" |
|
450 | 450 | self.ui.warn( |
|
451 | 451 | _("%d out of %d hunks FAILED -- saving rejects to file %s\n") % |
|
452 | 452 | (failed, total, fname)) |
|
453 | 453 | fp = self.opener(fname, 'w') |
|
454 | 454 | fp.writelines(lines) |
|
455 | 455 | fp.close() |
|
456 | 456 | |
|
457 | 457 | def exists(self, fname): |
|
458 | 458 | return os.path.lexists(self._join(fname)) |
|
459 | 459 | |
|
460 | 460 | class workingbackend(fsbackend): |
|
461 | 461 | def __init__(self, ui, repo, similarity): |
|
462 | 462 | super(workingbackend, self).__init__(ui, repo.root) |
|
463 | 463 | self.repo = repo |
|
464 | 464 | self.similarity = similarity |
|
465 | 465 | self.removed = set() |
|
466 | 466 | self.changed = set() |
|
467 | 467 | self.copied = [] |
|
468 | 468 | |
|
469 | 469 | def _checkknown(self, fname): |
|
470 | 470 | if self.repo.dirstate[fname] == '?' and self.exists(fname): |
|
471 | 471 | raise PatchError(_('cannot patch %s: file is not tracked') % fname) |
|
472 | 472 | |
|
473 | 473 | def setfile(self, fname, data, mode, copysource): |
|
474 | 474 | self._checkknown(fname) |
|
475 | 475 | super(workingbackend, self).setfile(fname, data, mode, copysource) |
|
476 | 476 | if copysource is not None: |
|
477 | 477 | self.copied.append((copysource, fname)) |
|
478 | 478 | self.changed.add(fname) |
|
479 | 479 | |
|
480 | 480 | def unlink(self, fname): |
|
481 | 481 | self._checkknown(fname) |
|
482 | 482 | super(workingbackend, self).unlink(fname) |
|
483 | 483 | self.removed.add(fname) |
|
484 | 484 | self.changed.add(fname) |
|
485 | 485 | |
|
486 | 486 | def close(self): |
|
487 | 487 | wctx = self.repo[None] |
|
488 | 488 | addremoved = set(self.changed) |
|
489 | 489 | for src, dst in self.copied: |
|
490 | 490 | scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst) |
|
491 | 491 | if self.removed: |
|
492 | 492 | wctx.forget(sorted(self.removed)) |
|
493 | 493 | for f in self.removed: |
|
494 | 494 | if f not in self.repo.dirstate: |
|
495 | 495 | # File was deleted and no longer belongs to the |
|
496 | 496 | # dirstate, it was probably marked added then |
|
497 | 497 | # deleted, and should not be considered by |
|
498 | 498 | # addremove(). |
|
499 | 499 | addremoved.discard(f) |
|
500 | 500 | if addremoved: |
|
501 | 501 | cwd = self.repo.getcwd() |
|
502 | 502 | if cwd: |
|
503 | 503 | addremoved = [util.pathto(self.repo.root, cwd, f) |
|
504 | 504 | for f in addremoved] |
|
505 | 505 | scmutil.addremove(self.repo, addremoved, similarity=self.similarity) |
|
506 | 506 | return sorted(self.changed) |
|
507 | 507 | |
|
508 | 508 | class filestore(object): |
|
509 | 509 | def __init__(self, maxsize=None): |
|
510 | 510 | self.opener = None |
|
511 | 511 | self.files = {} |
|
512 | 512 | self.created = 0 |
|
513 | 513 | self.maxsize = maxsize |
|
514 | 514 | if self.maxsize is None: |
|
515 | 515 | self.maxsize = 4*(2**20) |
|
516 | 516 | self.size = 0 |
|
517 | 517 | self.data = {} |
|
518 | 518 | |
|
519 | 519 | def setfile(self, fname, data, mode, copied=None): |
|
520 | 520 | if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize: |
|
521 | 521 | self.data[fname] = (data, mode, copied) |
|
522 | 522 | self.size += len(data) |
|
523 | 523 | else: |
|
524 | 524 | if self.opener is None: |
|
525 | 525 | root = tempfile.mkdtemp(prefix='hg-patch-') |
|
526 | 526 | self.opener = scmutil.opener(root) |
|
527 | 527 | # Avoid filename issues with these simple names |
|
528 | 528 | fn = str(self.created) |
|
529 | 529 | self.opener.write(fn, data) |
|
530 | 530 | self.created += 1 |
|
531 | 531 | self.files[fname] = (fn, mode, copied) |
|
532 | 532 | |
|
533 | 533 | def getfile(self, fname): |
|
534 | 534 | if fname in self.data: |
|
535 | 535 | return self.data[fname] |
|
536 | 536 | if not self.opener or fname not in self.files: |
|
537 |
raise IOError |
|
|
537 | raise IOError | |
|
538 | 538 | fn, mode, copied = self.files[fname] |
|
539 | 539 | return self.opener.read(fn), mode, copied |
|
540 | 540 | |
|
541 | 541 | def close(self): |
|
542 | 542 | if self.opener: |
|
543 | 543 | shutil.rmtree(self.opener.base) |
|
544 | 544 | |
|
545 | 545 | class repobackend(abstractbackend): |
|
546 | 546 | def __init__(self, ui, repo, ctx, store): |
|
547 | 547 | super(repobackend, self).__init__(ui) |
|
548 | 548 | self.repo = repo |
|
549 | 549 | self.ctx = ctx |
|
550 | 550 | self.store = store |
|
551 | 551 | self.changed = set() |
|
552 | 552 | self.removed = set() |
|
553 | 553 | self.copied = {} |
|
554 | 554 | |
|
555 | 555 | def _checkknown(self, fname): |
|
556 | 556 | if fname not in self.ctx: |
|
557 | 557 | raise PatchError(_('cannot patch %s: file is not tracked') % fname) |
|
558 | 558 | |
|
559 | 559 | def getfile(self, fname): |
|
560 | 560 | try: |
|
561 | 561 | fctx = self.ctx[fname] |
|
562 | 562 | except error.LookupError: |
|
563 |
raise IOError |
|
|
563 | raise IOError | |
|
564 | 564 | flags = fctx.flags() |
|
565 | 565 | return fctx.data(), ('l' in flags, 'x' in flags) |
|
566 | 566 | |
|
567 | 567 | def setfile(self, fname, data, mode, copysource): |
|
568 | 568 | if copysource: |
|
569 | 569 | self._checkknown(copysource) |
|
570 | 570 | if data is None: |
|
571 | 571 | data = self.ctx[fname].data() |
|
572 | 572 | self.store.setfile(fname, data, mode, copysource) |
|
573 | 573 | self.changed.add(fname) |
|
574 | 574 | if copysource: |
|
575 | 575 | self.copied[fname] = copysource |
|
576 | 576 | |
|
577 | 577 | def unlink(self, fname): |
|
578 | 578 | self._checkknown(fname) |
|
579 | 579 | self.removed.add(fname) |
|
580 | 580 | |
|
581 | 581 | def exists(self, fname): |
|
582 | 582 | return fname in self.ctx |
|
583 | 583 | |
|
584 | 584 | def close(self): |
|
585 | 585 | return self.changed | self.removed |
|
586 | 586 | |
|
587 | 587 | # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 |
|
588 | 588 | unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@') |
|
589 | 589 | contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)') |
|
590 | 590 | eolmodes = ['strict', 'crlf', 'lf', 'auto'] |
|
591 | 591 | |
|
592 | 592 | class patchfile(object): |
|
593 | 593 | def __init__(self, ui, gp, backend, store, eolmode='strict'): |
|
594 | 594 | self.fname = gp.path |
|
595 | 595 | self.eolmode = eolmode |
|
596 | 596 | self.eol = None |
|
597 | 597 | self.backend = backend |
|
598 | 598 | self.ui = ui |
|
599 | 599 | self.lines = [] |
|
600 | 600 | self.exists = False |
|
601 | 601 | self.missing = True |
|
602 | 602 | self.mode = gp.mode |
|
603 | 603 | self.copysource = gp.oldpath |
|
604 | 604 | self.create = gp.op in ('ADD', 'COPY', 'RENAME') |
|
605 | 605 | self.remove = gp.op == 'DELETE' |
|
606 | 606 | try: |
|
607 | 607 | if self.copysource is None: |
|
608 | 608 | data, mode = backend.getfile(self.fname) |
|
609 | 609 | self.exists = True |
|
610 | 610 | else: |
|
611 | 611 | data, mode = store.getfile(self.copysource)[:2] |
|
612 | 612 | self.exists = backend.exists(self.fname) |
|
613 | 613 | self.missing = False |
|
614 | 614 | if data: |
|
615 | 615 | self.lines = mdiff.splitnewlines(data) |
|
616 | 616 | if self.mode is None: |
|
617 | 617 | self.mode = mode |
|
618 | 618 | if self.lines: |
|
619 | 619 | # Normalize line endings |
|
620 | 620 | if self.lines[0].endswith('\r\n'): |
|
621 | 621 | self.eol = '\r\n' |
|
622 | 622 | elif self.lines[0].endswith('\n'): |
|
623 | 623 | self.eol = '\n' |
|
624 | 624 | if eolmode != 'strict': |
|
625 | 625 | nlines = [] |
|
626 | 626 | for l in self.lines: |
|
627 | 627 | if l.endswith('\r\n'): |
|
628 | 628 | l = l[:-2] + '\n' |
|
629 | 629 | nlines.append(l) |
|
630 | 630 | self.lines = nlines |
|
631 | 631 | except IOError: |
|
632 | 632 | if self.create: |
|
633 | 633 | self.missing = False |
|
634 | 634 | if self.mode is None: |
|
635 | 635 | self.mode = (False, False) |
|
636 | 636 | if self.missing: |
|
637 | 637 | self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) |
|
638 | 638 | |
|
639 | 639 | self.hash = {} |
|
640 | 640 | self.dirty = 0 |
|
641 | 641 | self.offset = 0 |
|
642 | 642 | self.skew = 0 |
|
643 | 643 | self.rej = [] |
|
644 | 644 | self.fileprinted = False |
|
645 | 645 | self.printfile(False) |
|
646 | 646 | self.hunks = 0 |
|
647 | 647 | |
|
648 | 648 | def writelines(self, fname, lines, mode): |
|
649 | 649 | if self.eolmode == 'auto': |
|
650 | 650 | eol = self.eol |
|
651 | 651 | elif self.eolmode == 'crlf': |
|
652 | 652 | eol = '\r\n' |
|
653 | 653 | else: |
|
654 | 654 | eol = '\n' |
|
655 | 655 | |
|
656 | 656 | if self.eolmode != 'strict' and eol and eol != '\n': |
|
657 | 657 | rawlines = [] |
|
658 | 658 | for l in lines: |
|
659 | 659 | if l and l[-1] == '\n': |
|
660 | 660 | l = l[:-1] + eol |
|
661 | 661 | rawlines.append(l) |
|
662 | 662 | lines = rawlines |
|
663 | 663 | |
|
664 | 664 | self.backend.setfile(fname, ''.join(lines), mode, self.copysource) |
|
665 | 665 | |
|
666 | 666 | def printfile(self, warn): |
|
667 | 667 | if self.fileprinted: |
|
668 | 668 | return |
|
669 | 669 | if warn or self.ui.verbose: |
|
670 | 670 | self.fileprinted = True |
|
671 | 671 | s = _("patching file %s\n") % self.fname |
|
672 | 672 | if warn: |
|
673 | 673 | self.ui.warn(s) |
|
674 | 674 | else: |
|
675 | 675 | self.ui.note(s) |
|
676 | 676 | |
|
677 | 677 | |
|
678 | 678 | def findlines(self, l, linenum): |
|
679 | 679 | # looks through the hash and finds candidate lines. The |
|
680 | 680 | # result is a list of line numbers sorted based on distance |
|
681 | 681 | # from linenum |
|
682 | 682 | |
|
683 | 683 | cand = self.hash.get(l, []) |
|
684 | 684 | if len(cand) > 1: |
|
685 | 685 | # resort our list of potentials forward then back. |
|
686 | 686 | cand.sort(key=lambda x: abs(x - linenum)) |
|
687 | 687 | return cand |
|
688 | 688 | |
|
689 | 689 | def write_rej(self): |
|
690 | 690 | # our rejects are a little different from patch(1). This always |
|
691 | 691 | # creates rejects in the same form as the original patch. A file |
|
692 | 692 | # header is inserted so that you can run the reject through patch again |
|
693 | 693 | # without having to type the filename. |
|
694 | 694 | if not self.rej: |
|
695 | 695 | return |
|
696 | 696 | base = os.path.basename(self.fname) |
|
697 | 697 | lines = ["--- %s\n+++ %s\n" % (base, base)] |
|
698 | 698 | for x in self.rej: |
|
699 | 699 | for l in x.hunk: |
|
700 | 700 | lines.append(l) |
|
701 | 701 | if l[-1] != '\n': |
|
702 | 702 | lines.append("\n\ No newline at end of file\n") |
|
703 | 703 | self.backend.writerej(self.fname, len(self.rej), self.hunks, lines) |
|
704 | 704 | |
|
705 | 705 | def apply(self, h): |
|
706 | 706 | if not h.complete(): |
|
707 | 707 | raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") % |
|
708 | 708 | (h.number, h.desc, len(h.a), h.lena, len(h.b), |
|
709 | 709 | h.lenb)) |
|
710 | 710 | |
|
711 | 711 | self.hunks += 1 |
|
712 | 712 | |
|
713 | 713 | if self.missing: |
|
714 | 714 | self.rej.append(h) |
|
715 | 715 | return -1 |
|
716 | 716 | |
|
717 | 717 | if self.exists and self.create: |
|
718 | 718 | if self.copysource: |
|
719 | 719 | self.ui.warn(_("cannot create %s: destination already " |
|
720 | 720 | "exists\n" % self.fname)) |
|
721 | 721 | else: |
|
722 | 722 | self.ui.warn(_("file %s already exists\n") % self.fname) |
|
723 | 723 | self.rej.append(h) |
|
724 | 724 | return -1 |
|
725 | 725 | |
|
726 | 726 | if isinstance(h, binhunk): |
|
727 | 727 | if self.remove: |
|
728 | 728 | self.backend.unlink(self.fname) |
|
729 | 729 | else: |
|
730 | 730 | self.lines[:] = h.new() |
|
731 | 731 | self.offset += len(h.new()) |
|
732 | 732 | self.dirty = True |
|
733 | 733 | return 0 |
|
734 | 734 | |
|
735 | 735 | horig = h |
|
736 | 736 | if (self.eolmode in ('crlf', 'lf') |
|
737 | 737 | or self.eolmode == 'auto' and self.eol): |
|
738 | 738 | # If new eols are going to be normalized, then normalize |
|
739 | 739 | # hunk data before patching. Otherwise, preserve input |
|
740 | 740 | # line-endings. |
|
741 | 741 | h = h.getnormalized() |
|
742 | 742 | |
|
743 | 743 | # fast case first, no offsets, no fuzz |
|
744 | 744 | old, oldstart, new, newstart = h.fuzzit(0, False) |
|
745 | 745 | oldstart += self.offset |
|
746 | 746 | orig_start = oldstart |
|
747 | 747 | # if there's skew we want to emit the "(offset %d lines)" even |
|
748 | 748 | # when the hunk cleanly applies at start + skew, so skip the |
|
749 | 749 | # fast case code |
|
750 | 750 | if (self.skew == 0 and |
|
751 | 751 | diffhelpers.testhunk(old, self.lines, oldstart) == 0): |
|
752 | 752 | if self.remove: |
|
753 | 753 | self.backend.unlink(self.fname) |
|
754 | 754 | else: |
|
755 | 755 | self.lines[oldstart:oldstart + len(old)] = new |
|
756 | 756 | self.offset += len(new) - len(old) |
|
757 | 757 | self.dirty = True |
|
758 | 758 | return 0 |
|
759 | 759 | |
|
760 | 760 | # ok, we couldn't match the hunk. Lets look for offsets and fuzz it |
|
761 | 761 | self.hash = {} |
|
762 | 762 | for x, s in enumerate(self.lines): |
|
763 | 763 | self.hash.setdefault(s, []).append(x) |
|
764 | 764 | |
|
765 | 765 | for fuzzlen in xrange(3): |
|
766 | 766 | for toponly in [True, False]: |
|
767 | 767 | old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly) |
|
768 | 768 | oldstart = oldstart + self.offset + self.skew |
|
769 | 769 | oldstart = min(oldstart, len(self.lines)) |
|
770 | 770 | if old: |
|
771 | 771 | cand = self.findlines(old[0][1:], oldstart) |
|
772 | 772 | else: |
|
773 | 773 | # Only adding lines with no or fuzzed context, just |
|
774 | 774 | # take the skew in account |
|
775 | 775 | cand = [oldstart] |
|
776 | 776 | |
|
777 | 777 | for l in cand: |
|
778 | 778 | if not old or diffhelpers.testhunk(old, self.lines, l) == 0: |
|
779 | 779 | self.lines[l : l + len(old)] = new |
|
780 | 780 | self.offset += len(new) - len(old) |
|
781 | 781 | self.skew = l - orig_start |
|
782 | 782 | self.dirty = True |
|
783 | 783 | offset = l - orig_start - fuzzlen |
|
784 | 784 | if fuzzlen: |
|
785 | 785 | msg = _("Hunk #%d succeeded at %d " |
|
786 | 786 | "with fuzz %d " |
|
787 | 787 | "(offset %d lines).\n") |
|
788 | 788 | self.printfile(True) |
|
789 | 789 | self.ui.warn(msg % |
|
790 | 790 | (h.number, l + 1, fuzzlen, offset)) |
|
791 | 791 | else: |
|
792 | 792 | msg = _("Hunk #%d succeeded at %d " |
|
793 | 793 | "(offset %d lines).\n") |
|
794 | 794 | self.ui.note(msg % (h.number, l + 1, offset)) |
|
795 | 795 | return fuzzlen |
|
796 | 796 | self.printfile(True) |
|
797 | 797 | self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) |
|
798 | 798 | self.rej.append(horig) |
|
799 | 799 | return -1 |
|
800 | 800 | |
|
801 | 801 | def close(self): |
|
802 | 802 | if self.dirty: |
|
803 | 803 | self.writelines(self.fname, self.lines, self.mode) |
|
804 | 804 | self.write_rej() |
|
805 | 805 | return len(self.rej) |
|
806 | 806 | |
|
807 | 807 | class hunk(object): |
|
808 | 808 | def __init__(self, desc, num, lr, context): |
|
809 | 809 | self.number = num |
|
810 | 810 | self.desc = desc |
|
811 | 811 | self.hunk = [desc] |
|
812 | 812 | self.a = [] |
|
813 | 813 | self.b = [] |
|
814 | 814 | self.starta = self.lena = None |
|
815 | 815 | self.startb = self.lenb = None |
|
816 | 816 | if lr is not None: |
|
817 | 817 | if context: |
|
818 | 818 | self.read_context_hunk(lr) |
|
819 | 819 | else: |
|
820 | 820 | self.read_unified_hunk(lr) |
|
821 | 821 | |
|
822 | 822 | def getnormalized(self): |
|
823 | 823 | """Return a copy with line endings normalized to LF.""" |
|
824 | 824 | |
|
825 | 825 | def normalize(lines): |
|
826 | 826 | nlines = [] |
|
827 | 827 | for line in lines: |
|
828 | 828 | if line.endswith('\r\n'): |
|
829 | 829 | line = line[:-2] + '\n' |
|
830 | 830 | nlines.append(line) |
|
831 | 831 | return nlines |
|
832 | 832 | |
|
833 | 833 | # Dummy object, it is rebuilt manually |
|
834 | 834 | nh = hunk(self.desc, self.number, None, None) |
|
835 | 835 | nh.number = self.number |
|
836 | 836 | nh.desc = self.desc |
|
837 | 837 | nh.hunk = self.hunk |
|
838 | 838 | nh.a = normalize(self.a) |
|
839 | 839 | nh.b = normalize(self.b) |
|
840 | 840 | nh.starta = self.starta |
|
841 | 841 | nh.startb = self.startb |
|
842 | 842 | nh.lena = self.lena |
|
843 | 843 | nh.lenb = self.lenb |
|
844 | 844 | return nh |
|
845 | 845 | |
|
846 | 846 | def read_unified_hunk(self, lr): |
|
847 | 847 | m = unidesc.match(self.desc) |
|
848 | 848 | if not m: |
|
849 | 849 | raise PatchError(_("bad hunk #%d") % self.number) |
|
850 | 850 | self.starta, self.lena, self.startb, self.lenb = m.groups() |
|
851 | 851 | if self.lena is None: |
|
852 | 852 | self.lena = 1 |
|
853 | 853 | else: |
|
854 | 854 | self.lena = int(self.lena) |
|
855 | 855 | if self.lenb is None: |
|
856 | 856 | self.lenb = 1 |
|
857 | 857 | else: |
|
858 | 858 | self.lenb = int(self.lenb) |
|
859 | 859 | self.starta = int(self.starta) |
|
860 | 860 | self.startb = int(self.startb) |
|
861 | 861 | diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, |
|
862 | 862 | self.b) |
|
863 | 863 | # if we hit eof before finishing out the hunk, the last line will |
|
864 | 864 | # be zero length. Lets try to fix it up. |
|
865 | 865 | while len(self.hunk[-1]) == 0: |
|
866 | 866 | del self.hunk[-1] |
|
867 | 867 | del self.a[-1] |
|
868 | 868 | del self.b[-1] |
|
869 | 869 | self.lena -= 1 |
|
870 | 870 | self.lenb -= 1 |
|
871 | 871 | self._fixnewline(lr) |
|
872 | 872 | |
|
873 | 873 | def read_context_hunk(self, lr): |
|
874 | 874 | self.desc = lr.readline() |
|
875 | 875 | m = contextdesc.match(self.desc) |
|
876 | 876 | if not m: |
|
877 | 877 | raise PatchError(_("bad hunk #%d") % self.number) |
|
878 | 878 | self.starta, aend = m.groups() |
|
879 | 879 | self.starta = int(self.starta) |
|
880 | 880 | if aend is None: |
|
881 | 881 | aend = self.starta |
|
882 | 882 | self.lena = int(aend) - self.starta |
|
883 | 883 | if self.starta: |
|
884 | 884 | self.lena += 1 |
|
885 | 885 | for x in xrange(self.lena): |
|
886 | 886 | l = lr.readline() |
|
887 | 887 | if l.startswith('---'): |
|
888 | 888 | # lines addition, old block is empty |
|
889 | 889 | lr.push(l) |
|
890 | 890 | break |
|
891 | 891 | s = l[2:] |
|
892 | 892 | if l.startswith('- ') or l.startswith('! '): |
|
893 | 893 | u = '-' + s |
|
894 | 894 | elif l.startswith(' '): |
|
895 | 895 | u = ' ' + s |
|
896 | 896 | else: |
|
897 | 897 | raise PatchError(_("bad hunk #%d old text line %d") % |
|
898 | 898 | (self.number, x)) |
|
899 | 899 | self.a.append(u) |
|
900 | 900 | self.hunk.append(u) |
|
901 | 901 | |
|
902 | 902 | l = lr.readline() |
|
903 | 903 | if l.startswith('\ '): |
|
904 | 904 | s = self.a[-1][:-1] |
|
905 | 905 | self.a[-1] = s |
|
906 | 906 | self.hunk[-1] = s |
|
907 | 907 | l = lr.readline() |
|
908 | 908 | m = contextdesc.match(l) |
|
909 | 909 | if not m: |
|
910 | 910 | raise PatchError(_("bad hunk #%d") % self.number) |
|
911 | 911 | self.startb, bend = m.groups() |
|
912 | 912 | self.startb = int(self.startb) |
|
913 | 913 | if bend is None: |
|
914 | 914 | bend = self.startb |
|
915 | 915 | self.lenb = int(bend) - self.startb |
|
916 | 916 | if self.startb: |
|
917 | 917 | self.lenb += 1 |
|
918 | 918 | hunki = 1 |
|
919 | 919 | for x in xrange(self.lenb): |
|
920 | 920 | l = lr.readline() |
|
921 | 921 | if l.startswith('\ '): |
|
922 | 922 | # XXX: the only way to hit this is with an invalid line range. |
|
923 | 923 | # The no-eol marker is not counted in the line range, but I |
|
924 | 924 | # guess there are diff(1) out there which behave differently. |
|
925 | 925 | s = self.b[-1][:-1] |
|
926 | 926 | self.b[-1] = s |
|
927 | 927 | self.hunk[hunki - 1] = s |
|
928 | 928 | continue |
|
929 | 929 | if not l: |
|
930 | 930 | # line deletions, new block is empty and we hit EOF |
|
931 | 931 | lr.push(l) |
|
932 | 932 | break |
|
933 | 933 | s = l[2:] |
|
934 | 934 | if l.startswith('+ ') or l.startswith('! '): |
|
935 | 935 | u = '+' + s |
|
936 | 936 | elif l.startswith(' '): |
|
937 | 937 | u = ' ' + s |
|
938 | 938 | elif len(self.b) == 0: |
|
939 | 939 | # line deletions, new block is empty |
|
940 | 940 | lr.push(l) |
|
941 | 941 | break |
|
942 | 942 | else: |
|
943 | 943 | raise PatchError(_("bad hunk #%d old text line %d") % |
|
944 | 944 | (self.number, x)) |
|
945 | 945 | self.b.append(s) |
|
946 | 946 | while True: |
|
947 | 947 | if hunki >= len(self.hunk): |
|
948 | 948 | h = "" |
|
949 | 949 | else: |
|
950 | 950 | h = self.hunk[hunki] |
|
951 | 951 | hunki += 1 |
|
952 | 952 | if h == u: |
|
953 | 953 | break |
|
954 | 954 | elif h.startswith('-'): |
|
955 | 955 | continue |
|
956 | 956 | else: |
|
957 | 957 | self.hunk.insert(hunki - 1, u) |
|
958 | 958 | break |
|
959 | 959 | |
|
960 | 960 | if not self.a: |
|
961 | 961 | # this happens when lines were only added to the hunk |
|
962 | 962 | for x in self.hunk: |
|
963 | 963 | if x.startswith('-') or x.startswith(' '): |
|
964 | 964 | self.a.append(x) |
|
965 | 965 | if not self.b: |
|
966 | 966 | # this happens when lines were only deleted from the hunk |
|
967 | 967 | for x in self.hunk: |
|
968 | 968 | if x.startswith('+') or x.startswith(' '): |
|
969 | 969 | self.b.append(x[1:]) |
|
970 | 970 | # @@ -start,len +start,len @@ |
|
971 | 971 | self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, |
|
972 | 972 | self.startb, self.lenb) |
|
973 | 973 | self.hunk[0] = self.desc |
|
974 | 974 | self._fixnewline(lr) |
|
975 | 975 | |
|
976 | 976 | def _fixnewline(self, lr): |
|
977 | 977 | l = lr.readline() |
|
978 | 978 | if l.startswith('\ '): |
|
979 | 979 | diffhelpers.fix_newline(self.hunk, self.a, self.b) |
|
980 | 980 | else: |
|
981 | 981 | lr.push(l) |
|
982 | 982 | |
|
983 | 983 | def complete(self): |
|
984 | 984 | return len(self.a) == self.lena and len(self.b) == self.lenb |
|
985 | 985 | |
|
986 | 986 | def _fuzzit(self, old, new, fuzz, toponly): |
|
987 | 987 | # this removes context lines from the top and bottom of list 'l'. It |
|
988 | 988 | # checks the hunk to make sure only context lines are removed, and then |
|
989 | 989 | # returns a new shortened list of lines. |
|
990 | 990 | fuzz = min(fuzz, len(old)) |
|
991 | 991 | if fuzz: |
|
992 | 992 | top = 0 |
|
993 | 993 | bot = 0 |
|
994 | 994 | hlen = len(self.hunk) |
|
995 | 995 | for x in xrange(hlen - 1): |
|
996 | 996 | # the hunk starts with the @@ line, so use x+1 |
|
997 | 997 | if self.hunk[x + 1][0] == ' ': |
|
998 | 998 | top += 1 |
|
999 | 999 | else: |
|
1000 | 1000 | break |
|
1001 | 1001 | if not toponly: |
|
1002 | 1002 | for x in xrange(hlen - 1): |
|
1003 | 1003 | if self.hunk[hlen - bot - 1][0] == ' ': |
|
1004 | 1004 | bot += 1 |
|
1005 | 1005 | else: |
|
1006 | 1006 | break |
|
1007 | 1007 | |
|
1008 | 1008 | bot = min(fuzz, bot) |
|
1009 | 1009 | top = min(fuzz, top) |
|
1010 | 1010 | return old[top:len(old)-bot], new[top:len(new)-bot], top |
|
1011 | 1011 | return old, new, 0 |
|
1012 | 1012 | |
|
1013 | 1013 | def fuzzit(self, fuzz, toponly): |
|
1014 | 1014 | old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly) |
|
1015 | 1015 | oldstart = self.starta + top |
|
1016 | 1016 | newstart = self.startb + top |
|
1017 | 1017 | # zero length hunk ranges already have their start decremented |
|
1018 | 1018 | if self.lena and oldstart > 0: |
|
1019 | 1019 | oldstart -= 1 |
|
1020 | 1020 | if self.lenb and newstart > 0: |
|
1021 | 1021 | newstart -= 1 |
|
1022 | 1022 | return old, oldstart, new, newstart |
|
1023 | 1023 | |
|
1024 | 1024 | class binhunk(object): |
|
1025 | 1025 | 'A binary patch file. Only understands literals so far.' |
|
1026 | 1026 | def __init__(self, lr, fname): |
|
1027 | 1027 | self.text = None |
|
1028 | 1028 | self.hunk = ['GIT binary patch\n'] |
|
1029 | 1029 | self._fname = fname |
|
1030 | 1030 | self._read(lr) |
|
1031 | 1031 | |
|
1032 | 1032 | def complete(self): |
|
1033 | 1033 | return self.text is not None |
|
1034 | 1034 | |
|
1035 | 1035 | def new(self): |
|
1036 | 1036 | return [self.text] |
|
1037 | 1037 | |
|
1038 | 1038 | def _read(self, lr): |
|
1039 | 1039 | def getline(lr, hunk): |
|
1040 | 1040 | l = lr.readline() |
|
1041 | 1041 | hunk.append(l) |
|
1042 | 1042 | return l.rstrip('\r\n') |
|
1043 | 1043 | |
|
1044 | 1044 | while True: |
|
1045 | 1045 | line = getline(lr, self.hunk) |
|
1046 | 1046 | if not line: |
|
1047 | 1047 | raise PatchError(_('could not extract "%s" binary data') |
|
1048 | 1048 | % self._fname) |
|
1049 | 1049 | if line.startswith('literal '): |
|
1050 | 1050 | break |
|
1051 | 1051 | size = int(line[8:].rstrip()) |
|
1052 | 1052 | dec = [] |
|
1053 | 1053 | line = getline(lr, self.hunk) |
|
1054 | 1054 | while len(line) > 1: |
|
1055 | 1055 | l = line[0] |
|
1056 | 1056 | if l <= 'Z' and l >= 'A': |
|
1057 | 1057 | l = ord(l) - ord('A') + 1 |
|
1058 | 1058 | else: |
|
1059 | 1059 | l = ord(l) - ord('a') + 27 |
|
1060 | 1060 | try: |
|
1061 | 1061 | dec.append(base85.b85decode(line[1:])[:l]) |
|
1062 | 1062 | except ValueError, e: |
|
1063 | 1063 | raise PatchError(_('could not decode "%s" binary patch: %s') |
|
1064 | 1064 | % (self._fname, str(e))) |
|
1065 | 1065 | line = getline(lr, self.hunk) |
|
1066 | 1066 | text = zlib.decompress(''.join(dec)) |
|
1067 | 1067 | if len(text) != size: |
|
1068 | 1068 | raise PatchError(_('"%s" length is %d bytes, should be %d') |
|
1069 | 1069 | % (self._fname, len(text), size)) |
|
1070 | 1070 | self.text = text |
|
1071 | 1071 | |
|
1072 | 1072 | def parsefilename(str): |
|
1073 | 1073 | # --- filename \t|space stuff |
|
1074 | 1074 | s = str[4:].rstrip('\r\n') |
|
1075 | 1075 | i = s.find('\t') |
|
1076 | 1076 | if i < 0: |
|
1077 | 1077 | i = s.find(' ') |
|
1078 | 1078 | if i < 0: |
|
1079 | 1079 | return s |
|
1080 | 1080 | return s[:i] |
|
1081 | 1081 | |
|
1082 | 1082 | def pathstrip(path, strip): |
|
1083 | 1083 | pathlen = len(path) |
|
1084 | 1084 | i = 0 |
|
1085 | 1085 | if strip == 0: |
|
1086 | 1086 | return '', path.rstrip() |
|
1087 | 1087 | count = strip |
|
1088 | 1088 | while count > 0: |
|
1089 | 1089 | i = path.find('/', i) |
|
1090 | 1090 | if i == -1: |
|
1091 | 1091 | raise PatchError(_("unable to strip away %d of %d dirs from %s") % |
|
1092 | 1092 | (count, strip, path)) |
|
1093 | 1093 | i += 1 |
|
1094 | 1094 | # consume '//' in the path |
|
1095 | 1095 | while i < pathlen - 1 and path[i] == '/': |
|
1096 | 1096 | i += 1 |
|
1097 | 1097 | count -= 1 |
|
1098 | 1098 | return path[:i].lstrip(), path[i:].rstrip() |
|
1099 | 1099 | |
|
1100 | 1100 | def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip): |
|
1101 | 1101 | nulla = afile_orig == "/dev/null" |
|
1102 | 1102 | nullb = bfile_orig == "/dev/null" |
|
1103 | 1103 | create = nulla and hunk.starta == 0 and hunk.lena == 0 |
|
1104 | 1104 | remove = nullb and hunk.startb == 0 and hunk.lenb == 0 |
|
1105 | 1105 | abase, afile = pathstrip(afile_orig, strip) |
|
1106 | 1106 | gooda = not nulla and backend.exists(afile) |
|
1107 | 1107 | bbase, bfile = pathstrip(bfile_orig, strip) |
|
1108 | 1108 | if afile == bfile: |
|
1109 | 1109 | goodb = gooda |
|
1110 | 1110 | else: |
|
1111 | 1111 | goodb = not nullb and backend.exists(bfile) |
|
1112 | 1112 | missing = not goodb and not gooda and not create |
|
1113 | 1113 | |
|
1114 | 1114 | # some diff programs apparently produce patches where the afile is |
|
1115 | 1115 | # not /dev/null, but afile starts with bfile |
|
1116 | 1116 | abasedir = afile[:afile.rfind('/') + 1] |
|
1117 | 1117 | bbasedir = bfile[:bfile.rfind('/') + 1] |
|
1118 | 1118 | if (missing and abasedir == bbasedir and afile.startswith(bfile) |
|
1119 | 1119 | and hunk.starta == 0 and hunk.lena == 0): |
|
1120 | 1120 | create = True |
|
1121 | 1121 | missing = False |
|
1122 | 1122 | |
|
1123 | 1123 | # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the |
|
1124 | 1124 | # diff is between a file and its backup. In this case, the original |
|
1125 | 1125 | # file should be patched (see original mpatch code). |
|
1126 | 1126 | isbackup = (abase == bbase and bfile.startswith(afile)) |
|
1127 | 1127 | fname = None |
|
1128 | 1128 | if not missing: |
|
1129 | 1129 | if gooda and goodb: |
|
1130 | 1130 | fname = isbackup and afile or bfile |
|
1131 | 1131 | elif gooda: |
|
1132 | 1132 | fname = afile |
|
1133 | 1133 | |
|
1134 | 1134 | if not fname: |
|
1135 | 1135 | if not nullb: |
|
1136 | 1136 | fname = isbackup and afile or bfile |
|
1137 | 1137 | elif not nulla: |
|
1138 | 1138 | fname = afile |
|
1139 | 1139 | else: |
|
1140 | 1140 | raise PatchError(_("undefined source and destination files")) |
|
1141 | 1141 | |
|
1142 | 1142 | gp = patchmeta(fname) |
|
1143 | 1143 | if create: |
|
1144 | 1144 | gp.op = 'ADD' |
|
1145 | 1145 | elif remove: |
|
1146 | 1146 | gp.op = 'DELETE' |
|
1147 | 1147 | return gp |
|
1148 | 1148 | |
|
1149 | 1149 | def scangitpatch(lr, firstline): |
|
1150 | 1150 | """ |
|
1151 | 1151 | Git patches can emit: |
|
1152 | 1152 | - rename a to b |
|
1153 | 1153 | - change b |
|
1154 | 1154 | - copy a to c |
|
1155 | 1155 | - change c |
|
1156 | 1156 | |
|
1157 | 1157 | We cannot apply this sequence as-is, the renamed 'a' could not be |
|
1158 | 1158 | found for it would have been renamed already. And we cannot copy |
|
1159 | 1159 | from 'b' instead because 'b' would have been changed already. So |
|
1160 | 1160 | we scan the git patch for copy and rename commands so we can |
|
1161 | 1161 | perform the copies ahead of time. |
|
1162 | 1162 | """ |
|
1163 | 1163 | pos = 0 |
|
1164 | 1164 | try: |
|
1165 | 1165 | pos = lr.fp.tell() |
|
1166 | 1166 | fp = lr.fp |
|
1167 | 1167 | except IOError: |
|
1168 | 1168 | fp = cStringIO.StringIO(lr.fp.read()) |
|
1169 | 1169 | gitlr = linereader(fp) |
|
1170 | 1170 | gitlr.push(firstline) |
|
1171 | 1171 | gitpatches = readgitpatch(gitlr) |
|
1172 | 1172 | fp.seek(pos) |
|
1173 | 1173 | return gitpatches |
|
1174 | 1174 | |
|
1175 | 1175 | def iterhunks(fp): |
|
1176 | 1176 | """Read a patch and yield the following events: |
|
1177 | 1177 | - ("file", afile, bfile, firsthunk): select a new target file. |
|
1178 | 1178 | - ("hunk", hunk): a new hunk is ready to be applied, follows a |
|
1179 | 1179 | "file" event. |
|
1180 | 1180 | - ("git", gitchanges): current diff is in git format, gitchanges |
|
1181 | 1181 | maps filenames to gitpatch records. Unique event. |
|
1182 | 1182 | """ |
|
1183 | 1183 | afile = "" |
|
1184 | 1184 | bfile = "" |
|
1185 | 1185 | state = None |
|
1186 | 1186 | hunknum = 0 |
|
1187 | 1187 | emitfile = newfile = False |
|
1188 | 1188 | gitpatches = None |
|
1189 | 1189 | |
|
1190 | 1190 | # our states |
|
1191 | 1191 | BFILE = 1 |
|
1192 | 1192 | context = None |
|
1193 | 1193 | lr = linereader(fp) |
|
1194 | 1194 | |
|
1195 | 1195 | while True: |
|
1196 | 1196 | x = lr.readline() |
|
1197 | 1197 | if not x: |
|
1198 | 1198 | break |
|
1199 | 1199 | if state == BFILE and ( |
|
1200 | 1200 | (not context and x[0] == '@') |
|
1201 | 1201 | or (context is not False and x.startswith('***************')) |
|
1202 | 1202 | or x.startswith('GIT binary patch')): |
|
1203 | 1203 | gp = None |
|
1204 | 1204 | if (gitpatches and |
|
1205 | 1205 | gitpatches[-1].ispatching(afile, bfile)): |
|
1206 | 1206 | gp = gitpatches.pop() |
|
1207 | 1207 | if x.startswith('GIT binary patch'): |
|
1208 | 1208 | h = binhunk(lr, gp.path) |
|
1209 | 1209 | else: |
|
1210 | 1210 | if context is None and x.startswith('***************'): |
|
1211 | 1211 | context = True |
|
1212 | 1212 | h = hunk(x, hunknum + 1, lr, context) |
|
1213 | 1213 | hunknum += 1 |
|
1214 | 1214 | if emitfile: |
|
1215 | 1215 | emitfile = False |
|
1216 | 1216 | yield 'file', (afile, bfile, h, gp and gp.copy() or None) |
|
1217 | 1217 | yield 'hunk', h |
|
1218 | 1218 | elif x.startswith('diff --git'): |
|
1219 | 1219 | m = gitre.match(x.rstrip(' \r\n')) |
|
1220 | 1220 | if not m: |
|
1221 | 1221 | continue |
|
1222 | 1222 | if gitpatches is None: |
|
1223 | 1223 | # scan whole input for git metadata |
|
1224 | 1224 | gitpatches = scangitpatch(lr, x) |
|
1225 | 1225 | yield 'git', [g.copy() for g in gitpatches |
|
1226 | 1226 | if g.op in ('COPY', 'RENAME')] |
|
1227 | 1227 | gitpatches.reverse() |
|
1228 | 1228 | afile = 'a/' + m.group(1) |
|
1229 | 1229 | bfile = 'b/' + m.group(2) |
|
1230 | 1230 | while gitpatches and not gitpatches[-1].ispatching(afile, bfile): |
|
1231 | 1231 | gp = gitpatches.pop() |
|
1232 | 1232 | yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) |
|
1233 | 1233 | if not gitpatches: |
|
1234 | 1234 | raise PatchError(_('failed to synchronize metadata for "%s"') |
|
1235 | 1235 | % afile[2:]) |
|
1236 | 1236 | gp = gitpatches[-1] |
|
1237 | 1237 | newfile = True |
|
1238 | 1238 | elif x.startswith('---'): |
|
1239 | 1239 | # check for a unified diff |
|
1240 | 1240 | l2 = lr.readline() |
|
1241 | 1241 | if not l2.startswith('+++'): |
|
1242 | 1242 | lr.push(l2) |
|
1243 | 1243 | continue |
|
1244 | 1244 | newfile = True |
|
1245 | 1245 | context = False |
|
1246 | 1246 | afile = parsefilename(x) |
|
1247 | 1247 | bfile = parsefilename(l2) |
|
1248 | 1248 | elif x.startswith('***'): |
|
1249 | 1249 | # check for a context diff |
|
1250 | 1250 | l2 = lr.readline() |
|
1251 | 1251 | if not l2.startswith('---'): |
|
1252 | 1252 | lr.push(l2) |
|
1253 | 1253 | continue |
|
1254 | 1254 | l3 = lr.readline() |
|
1255 | 1255 | lr.push(l3) |
|
1256 | 1256 | if not l3.startswith("***************"): |
|
1257 | 1257 | lr.push(l2) |
|
1258 | 1258 | continue |
|
1259 | 1259 | newfile = True |
|
1260 | 1260 | context = True |
|
1261 | 1261 | afile = parsefilename(x) |
|
1262 | 1262 | bfile = parsefilename(l2) |
|
1263 | 1263 | |
|
1264 | 1264 | if newfile: |
|
1265 | 1265 | newfile = False |
|
1266 | 1266 | emitfile = True |
|
1267 | 1267 | state = BFILE |
|
1268 | 1268 | hunknum = 0 |
|
1269 | 1269 | |
|
1270 | 1270 | while gitpatches: |
|
1271 | 1271 | gp = gitpatches.pop() |
|
1272 | 1272 | yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) |
|
1273 | 1273 | |
|
1274 | 1274 | def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'): |
|
1275 | 1275 | """Reads a patch from fp and tries to apply it. |
|
1276 | 1276 | |
|
1277 | 1277 | Returns 0 for a clean patch, -1 if any rejects were found and 1 if |
|
1278 | 1278 | there was any fuzz. |
|
1279 | 1279 | |
|
1280 | 1280 | If 'eolmode' is 'strict', the patch content and patched file are |
|
1281 | 1281 | read in binary mode. Otherwise, line endings are ignored when |
|
1282 | 1282 | patching then normalized according to 'eolmode'. |
|
1283 | 1283 | """ |
|
1284 | 1284 | return _applydiff(ui, fp, patchfile, backend, store, strip=strip, |
|
1285 | 1285 | eolmode=eolmode) |
|
1286 | 1286 | |
|
1287 | 1287 | def _applydiff(ui, fp, patcher, backend, store, strip=1, |
|
1288 | 1288 | eolmode='strict'): |
|
1289 | 1289 | |
|
1290 | 1290 | def pstrip(p): |
|
1291 | 1291 | return pathstrip(p, strip - 1)[1] |
|
1292 | 1292 | |
|
1293 | 1293 | rejects = 0 |
|
1294 | 1294 | err = 0 |
|
1295 | 1295 | current_file = None |
|
1296 | 1296 | |
|
1297 | 1297 | for state, values in iterhunks(fp): |
|
1298 | 1298 | if state == 'hunk': |
|
1299 | 1299 | if not current_file: |
|
1300 | 1300 | continue |
|
1301 | 1301 | ret = current_file.apply(values) |
|
1302 | 1302 | if ret > 0: |
|
1303 | 1303 | err = 1 |
|
1304 | 1304 | elif state == 'file': |
|
1305 | 1305 | if current_file: |
|
1306 | 1306 | rejects += current_file.close() |
|
1307 | 1307 | current_file = None |
|
1308 | 1308 | afile, bfile, first_hunk, gp = values |
|
1309 | 1309 | if gp: |
|
1310 | 1310 | gp.path = pstrip(gp.path) |
|
1311 | 1311 | if gp.oldpath: |
|
1312 | 1312 | gp.oldpath = pstrip(gp.oldpath) |
|
1313 | 1313 | else: |
|
1314 | 1314 | gp = makepatchmeta(backend, afile, bfile, first_hunk, strip) |
|
1315 | 1315 | if gp.op == 'RENAME': |
|
1316 | 1316 | backend.unlink(gp.oldpath) |
|
1317 | 1317 | if not first_hunk: |
|
1318 | 1318 | if gp.op == 'DELETE': |
|
1319 | 1319 | backend.unlink(gp.path) |
|
1320 | 1320 | continue |
|
1321 | 1321 | data, mode = None, None |
|
1322 | 1322 | if gp.op in ('RENAME', 'COPY'): |
|
1323 | 1323 | data, mode = store.getfile(gp.oldpath)[:2] |
|
1324 | 1324 | if gp.mode: |
|
1325 | 1325 | mode = gp.mode |
|
1326 | 1326 | if gp.op == 'ADD': |
|
1327 | 1327 | # Added files without content have no hunk and |
|
1328 | 1328 | # must be created |
|
1329 | 1329 | data = '' |
|
1330 | 1330 | if data or mode: |
|
1331 | 1331 | if (gp.op in ('ADD', 'RENAME', 'COPY') |
|
1332 | 1332 | and backend.exists(gp.path)): |
|
1333 | 1333 | raise PatchError(_("cannot create %s: destination " |
|
1334 | 1334 | "already exists") % gp.path) |
|
1335 | 1335 | backend.setfile(gp.path, data, mode, gp.oldpath) |
|
1336 | 1336 | continue |
|
1337 | 1337 | try: |
|
1338 | 1338 | current_file = patcher(ui, gp, backend, store, |
|
1339 | 1339 | eolmode=eolmode) |
|
1340 | 1340 | except PatchError, inst: |
|
1341 | 1341 | ui.warn(str(inst) + '\n') |
|
1342 | 1342 | current_file = None |
|
1343 | 1343 | rejects += 1 |
|
1344 | 1344 | continue |
|
1345 | 1345 | elif state == 'git': |
|
1346 | 1346 | for gp in values: |
|
1347 | 1347 | path = pstrip(gp.oldpath) |
|
1348 | 1348 | data, mode = backend.getfile(path) |
|
1349 | 1349 | store.setfile(path, data, mode) |
|
1350 | 1350 | else: |
|
1351 | 1351 | raise util.Abort(_('unsupported parser state: %s') % state) |
|
1352 | 1352 | |
|
1353 | 1353 | if current_file: |
|
1354 | 1354 | rejects += current_file.close() |
|
1355 | 1355 | |
|
1356 | 1356 | if rejects: |
|
1357 | 1357 | return -1 |
|
1358 | 1358 | return err |
|
1359 | 1359 | |
|
1360 | 1360 | def _externalpatch(ui, repo, patcher, patchname, strip, files, |
|
1361 | 1361 | similarity): |
|
1362 | 1362 | """use <patcher> to apply <patchname> to the working directory. |
|
1363 | 1363 | returns whether patch was applied with fuzz factor.""" |
|
1364 | 1364 | |
|
1365 | 1365 | fuzz = False |
|
1366 | 1366 | args = [] |
|
1367 | 1367 | cwd = repo.root |
|
1368 | 1368 | if cwd: |
|
1369 | 1369 | args.append('-d %s' % util.shellquote(cwd)) |
|
1370 | 1370 | fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip, |
|
1371 | 1371 | util.shellquote(patchname))) |
|
1372 | 1372 | try: |
|
1373 | 1373 | for line in fp: |
|
1374 | 1374 | line = line.rstrip() |
|
1375 | 1375 | ui.note(line + '\n') |
|
1376 | 1376 | if line.startswith('patching file '): |
|
1377 | 1377 | pf = util.parsepatchoutput(line) |
|
1378 | 1378 | printed_file = False |
|
1379 | 1379 | files.add(pf) |
|
1380 | 1380 | elif line.find('with fuzz') >= 0: |
|
1381 | 1381 | fuzz = True |
|
1382 | 1382 | if not printed_file: |
|
1383 | 1383 | ui.warn(pf + '\n') |
|
1384 | 1384 | printed_file = True |
|
1385 | 1385 | ui.warn(line + '\n') |
|
1386 | 1386 | elif line.find('saving rejects to file') >= 0: |
|
1387 | 1387 | ui.warn(line + '\n') |
|
1388 | 1388 | elif line.find('FAILED') >= 0: |
|
1389 | 1389 | if not printed_file: |
|
1390 | 1390 | ui.warn(pf + '\n') |
|
1391 | 1391 | printed_file = True |
|
1392 | 1392 | ui.warn(line + '\n') |
|
1393 | 1393 | finally: |
|
1394 | 1394 | if files: |
|
1395 | 1395 | cfiles = list(files) |
|
1396 | 1396 | cwd = repo.getcwd() |
|
1397 | 1397 | if cwd: |
|
1398 | 1398 | cfiles = [util.pathto(repo.root, cwd, f) |
|
1399 | 1399 | for f in cfiles] |
|
1400 | 1400 | scmutil.addremove(repo, cfiles, similarity=similarity) |
|
1401 | 1401 | code = fp.close() |
|
1402 | 1402 | if code: |
|
1403 | 1403 | raise PatchError(_("patch command failed: %s") % |
|
1404 | 1404 | util.explainexit(code)[0]) |
|
1405 | 1405 | return fuzz |
|
1406 | 1406 | |
|
1407 | 1407 | def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'): |
|
1408 | 1408 | if files is None: |
|
1409 | 1409 | files = set() |
|
1410 | 1410 | if eolmode is None: |
|
1411 | 1411 | eolmode = ui.config('patch', 'eol', 'strict') |
|
1412 | 1412 | if eolmode.lower() not in eolmodes: |
|
1413 | 1413 | raise util.Abort(_('unsupported line endings type: %s') % eolmode) |
|
1414 | 1414 | eolmode = eolmode.lower() |
|
1415 | 1415 | |
|
1416 | 1416 | store = filestore() |
|
1417 | 1417 | try: |
|
1418 | 1418 | fp = open(patchobj, 'rb') |
|
1419 | 1419 | except TypeError: |
|
1420 | 1420 | fp = patchobj |
|
1421 | 1421 | try: |
|
1422 | 1422 | ret = applydiff(ui, fp, backend, store, strip=strip, |
|
1423 | 1423 | eolmode=eolmode) |
|
1424 | 1424 | finally: |
|
1425 | 1425 | if fp != patchobj: |
|
1426 | 1426 | fp.close() |
|
1427 | 1427 | files.update(backend.close()) |
|
1428 | 1428 | store.close() |
|
1429 | 1429 | if ret < 0: |
|
1430 | 1430 | raise PatchError(_('patch failed to apply')) |
|
1431 | 1431 | return ret > 0 |
|
1432 | 1432 | |
|
1433 | 1433 | def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict', |
|
1434 | 1434 | similarity=0): |
|
1435 | 1435 | """use builtin patch to apply <patchobj> to the working directory. |
|
1436 | 1436 | returns whether patch was applied with fuzz factor.""" |
|
1437 | 1437 | backend = workingbackend(ui, repo, similarity) |
|
1438 | 1438 | return patchbackend(ui, backend, patchobj, strip, files, eolmode) |
|
1439 | 1439 | |
|
1440 | 1440 | def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None, |
|
1441 | 1441 | eolmode='strict'): |
|
1442 | 1442 | backend = repobackend(ui, repo, ctx, store) |
|
1443 | 1443 | return patchbackend(ui, backend, patchobj, strip, files, eolmode) |
|
1444 | 1444 | |
|
1445 | 1445 | def makememctx(repo, parents, text, user, date, branch, files, store, |
|
1446 | 1446 | editor=None): |
|
1447 | 1447 | def getfilectx(repo, memctx, path): |
|
1448 | 1448 | data, (islink, isexec), copied = store.getfile(path) |
|
1449 | 1449 | return context.memfilectx(path, data, islink=islink, isexec=isexec, |
|
1450 | 1450 | copied=copied) |
|
1451 | 1451 | extra = {} |
|
1452 | 1452 | if branch: |
|
1453 | 1453 | extra['branch'] = encoding.fromlocal(branch) |
|
1454 | 1454 | ctx = context.memctx(repo, parents, text, files, getfilectx, user, |
|
1455 | 1455 | date, extra) |
|
1456 | 1456 | if editor: |
|
1457 | 1457 | ctx._text = editor(repo, ctx, []) |
|
1458 | 1458 | return ctx |
|
1459 | 1459 | |
|
1460 | 1460 | def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict', |
|
1461 | 1461 | similarity=0): |
|
1462 | 1462 | """Apply <patchname> to the working directory. |
|
1463 | 1463 | |
|
1464 | 1464 | 'eolmode' specifies how end of lines should be handled. It can be: |
|
1465 | 1465 | - 'strict': inputs are read in binary mode, EOLs are preserved |
|
1466 | 1466 | - 'crlf': EOLs are ignored when patching and reset to CRLF |
|
1467 | 1467 | - 'lf': EOLs are ignored when patching and reset to LF |
|
1468 | 1468 | - None: get it from user settings, default to 'strict' |
|
1469 | 1469 | 'eolmode' is ignored when using an external patcher program. |
|
1470 | 1470 | |
|
1471 | 1471 | Returns whether patch was applied with fuzz factor. |
|
1472 | 1472 | """ |
|
1473 | 1473 | patcher = ui.config('ui', 'patch') |
|
1474 | 1474 | if files is None: |
|
1475 | 1475 | files = set() |
|
1476 | 1476 | try: |
|
1477 | 1477 | if patcher: |
|
1478 | 1478 | return _externalpatch(ui, repo, patcher, patchname, strip, |
|
1479 | 1479 | files, similarity) |
|
1480 | 1480 | return internalpatch(ui, repo, patchname, strip, files, eolmode, |
|
1481 | 1481 | similarity) |
|
1482 | 1482 | except PatchError, err: |
|
1483 | 1483 | raise util.Abort(str(err)) |
|
1484 | 1484 | |
|
1485 | 1485 | def changedfiles(ui, repo, patchpath, strip=1): |
|
1486 | 1486 | backend = fsbackend(ui, repo.root) |
|
1487 | 1487 | fp = open(patchpath, 'rb') |
|
1488 | 1488 | try: |
|
1489 | 1489 | changed = set() |
|
1490 | 1490 | for state, values in iterhunks(fp): |
|
1491 | 1491 | if state == 'file': |
|
1492 | 1492 | afile, bfile, first_hunk, gp = values |
|
1493 | 1493 | if gp: |
|
1494 | 1494 | gp.path = pathstrip(gp.path, strip - 1)[1] |
|
1495 | 1495 | if gp.oldpath: |
|
1496 | 1496 | gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1] |
|
1497 | 1497 | else: |
|
1498 | 1498 | gp = makepatchmeta(backend, afile, bfile, first_hunk, strip) |
|
1499 | 1499 | changed.add(gp.path) |
|
1500 | 1500 | if gp.op == 'RENAME': |
|
1501 | 1501 | changed.add(gp.oldpath) |
|
1502 | 1502 | elif state not in ('hunk', 'git'): |
|
1503 | 1503 | raise util.Abort(_('unsupported parser state: %s') % state) |
|
1504 | 1504 | return changed |
|
1505 | 1505 | finally: |
|
1506 | 1506 | fp.close() |
|
1507 | 1507 | |
|
1508 | 1508 | def b85diff(to, tn): |
|
1509 | 1509 | '''print base85-encoded binary diff''' |
|
1510 | 1510 | def gitindex(text): |
|
1511 | 1511 | if not text: |
|
1512 | 1512 | return hex(nullid) |
|
1513 | 1513 | l = len(text) |
|
1514 | 1514 | s = util.sha1('blob %d\0' % l) |
|
1515 | 1515 | s.update(text) |
|
1516 | 1516 | return s.hexdigest() |
|
1517 | 1517 | |
|
1518 | 1518 | def fmtline(line): |
|
1519 | 1519 | l = len(line) |
|
1520 | 1520 | if l <= 26: |
|
1521 | 1521 | l = chr(ord('A') + l - 1) |
|
1522 | 1522 | else: |
|
1523 | 1523 | l = chr(l - 26 + ord('a') - 1) |
|
1524 | 1524 | return '%c%s\n' % (l, base85.b85encode(line, True)) |
|
1525 | 1525 | |
|
1526 | 1526 | def chunk(text, csize=52): |
|
1527 | 1527 | l = len(text) |
|
1528 | 1528 | i = 0 |
|
1529 | 1529 | while i < l: |
|
1530 | 1530 | yield text[i:i + csize] |
|
1531 | 1531 | i += csize |
|
1532 | 1532 | |
|
1533 | 1533 | tohash = gitindex(to) |
|
1534 | 1534 | tnhash = gitindex(tn) |
|
1535 | 1535 | if tohash == tnhash: |
|
1536 | 1536 | return "" |
|
1537 | 1537 | |
|
1538 | 1538 | # TODO: deltas |
|
1539 | 1539 | ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' % |
|
1540 | 1540 | (tohash, tnhash, len(tn))] |
|
1541 | 1541 | for l in chunk(zlib.compress(tn)): |
|
1542 | 1542 | ret.append(fmtline(l)) |
|
1543 | 1543 | ret.append('\n') |
|
1544 | 1544 | return ''.join(ret) |
|
1545 | 1545 | |
|
1546 | 1546 | class GitDiffRequired(Exception): |
|
1547 | 1547 | pass |
|
1548 | 1548 | |
|
1549 | 1549 | def diffopts(ui, opts=None, untrusted=False, section='diff'): |
|
1550 | 1550 | def get(key, name=None, getter=ui.configbool): |
|
1551 | 1551 | return ((opts and opts.get(key)) or |
|
1552 | 1552 | getter(section, name or key, None, untrusted=untrusted)) |
|
1553 | 1553 | return mdiff.diffopts( |
|
1554 | 1554 | text=opts and opts.get('text'), |
|
1555 | 1555 | git=get('git'), |
|
1556 | 1556 | nodates=get('nodates'), |
|
1557 | 1557 | showfunc=get('show_function', 'showfunc'), |
|
1558 | 1558 | ignorews=get('ignore_all_space', 'ignorews'), |
|
1559 | 1559 | ignorewsamount=get('ignore_space_change', 'ignorewsamount'), |
|
1560 | 1560 | ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'), |
|
1561 | 1561 | context=get('unified', getter=ui.config)) |
|
1562 | 1562 | |
|
1563 | 1563 | def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None, |
|
1564 | 1564 | losedatafn=None, prefix=''): |
|
1565 | 1565 | '''yields diff of changes to files between two nodes, or node and |
|
1566 | 1566 | working directory. |
|
1567 | 1567 | |
|
1568 | 1568 | if node1 is None, use first dirstate parent instead. |
|
1569 | 1569 | if node2 is None, compare node1 with working directory. |
|
1570 | 1570 | |
|
1571 | 1571 | losedatafn(**kwarg) is a callable run when opts.upgrade=True and |
|
1572 | 1572 | every time some change cannot be represented with the current |
|
1573 | 1573 | patch format. Return False to upgrade to git patch format, True to |
|
1574 | 1574 | accept the loss or raise an exception to abort the diff. It is |
|
1575 | 1575 | called with the name of current file being diffed as 'fn'. If set |
|
1576 | 1576 | to None, patches will always be upgraded to git format when |
|
1577 | 1577 | necessary. |
|
1578 | 1578 | |
|
1579 | 1579 | prefix is a filename prefix that is prepended to all filenames on |
|
1580 | 1580 | display (used for subrepos). |
|
1581 | 1581 | ''' |
|
1582 | 1582 | |
|
1583 | 1583 | if opts is None: |
|
1584 | 1584 | opts = mdiff.defaultopts |
|
1585 | 1585 | |
|
1586 | 1586 | if not node1 and not node2: |
|
1587 | 1587 | node1 = repo.dirstate.p1() |
|
1588 | 1588 | |
|
1589 | 1589 | def lrugetfilectx(): |
|
1590 | 1590 | cache = {} |
|
1591 | 1591 | order = [] |
|
1592 | 1592 | def getfilectx(f, ctx): |
|
1593 | 1593 | fctx = ctx.filectx(f, filelog=cache.get(f)) |
|
1594 | 1594 | if f not in cache: |
|
1595 | 1595 | if len(cache) > 20: |
|
1596 | 1596 | del cache[order.pop(0)] |
|
1597 | 1597 | cache[f] = fctx.filelog() |
|
1598 | 1598 | else: |
|
1599 | 1599 | order.remove(f) |
|
1600 | 1600 | order.append(f) |
|
1601 | 1601 | return fctx |
|
1602 | 1602 | return getfilectx |
|
1603 | 1603 | getfilectx = lrugetfilectx() |
|
1604 | 1604 | |
|
1605 | 1605 | ctx1 = repo[node1] |
|
1606 | 1606 | ctx2 = repo[node2] |
|
1607 | 1607 | |
|
1608 | 1608 | if not changes: |
|
1609 | 1609 | changes = repo.status(ctx1, ctx2, match=match) |
|
1610 | 1610 | modified, added, removed = changes[:3] |
|
1611 | 1611 | |
|
1612 | 1612 | if not modified and not added and not removed: |
|
1613 | 1613 | return [] |
|
1614 | 1614 | |
|
1615 | 1615 | revs = None |
|
1616 | 1616 | if not repo.ui.quiet: |
|
1617 | 1617 | hexfunc = repo.ui.debugflag and hex or short |
|
1618 | 1618 | revs = [hexfunc(node) for node in [node1, node2] if node] |
|
1619 | 1619 | |
|
1620 | 1620 | copy = {} |
|
1621 | 1621 | if opts.git or opts.upgrade: |
|
1622 | 1622 | copy = copies.pathcopies(ctx1, ctx2) |
|
1623 | 1623 | |
|
1624 | 1624 | difffn = (lambda opts, losedata: |
|
1625 | 1625 | trydiff(repo, revs, ctx1, ctx2, modified, added, removed, |
|
1626 | 1626 | copy, getfilectx, opts, losedata, prefix)) |
|
1627 | 1627 | if opts.upgrade and not opts.git: |
|
1628 | 1628 | try: |
|
1629 | 1629 | def losedata(fn): |
|
1630 | 1630 | if not losedatafn or not losedatafn(fn=fn): |
|
1631 |
raise GitDiffRequired |
|
|
1631 | raise GitDiffRequired | |
|
1632 | 1632 | # Buffer the whole output until we are sure it can be generated |
|
1633 | 1633 | return list(difffn(opts.copy(git=False), losedata)) |
|
1634 | 1634 | except GitDiffRequired: |
|
1635 | 1635 | return difffn(opts.copy(git=True), None) |
|
1636 | 1636 | else: |
|
1637 | 1637 | return difffn(opts, None) |
|
1638 | 1638 | |
|
1639 | 1639 | def difflabel(func, *args, **kw): |
|
1640 | 1640 | '''yields 2-tuples of (output, label) based on the output of func()''' |
|
1641 | 1641 | headprefixes = [('diff', 'diff.diffline'), |
|
1642 | 1642 | ('copy', 'diff.extended'), |
|
1643 | 1643 | ('rename', 'diff.extended'), |
|
1644 | 1644 | ('old', 'diff.extended'), |
|
1645 | 1645 | ('new', 'diff.extended'), |
|
1646 | 1646 | ('deleted', 'diff.extended'), |
|
1647 | 1647 | ('---', 'diff.file_a'), |
|
1648 | 1648 | ('+++', 'diff.file_b')] |
|
1649 | 1649 | textprefixes = [('@', 'diff.hunk'), |
|
1650 | 1650 | ('-', 'diff.deleted'), |
|
1651 | 1651 | ('+', 'diff.inserted')] |
|
1652 | 1652 | head = False |
|
1653 | 1653 | for chunk in func(*args, **kw): |
|
1654 | 1654 | lines = chunk.split('\n') |
|
1655 | 1655 | for i, line in enumerate(lines): |
|
1656 | 1656 | if i != 0: |
|
1657 | 1657 | yield ('\n', '') |
|
1658 | 1658 | if head: |
|
1659 | 1659 | if line.startswith('@'): |
|
1660 | 1660 | head = False |
|
1661 | 1661 | else: |
|
1662 | 1662 | if line and line[0] not in ' +-@\\': |
|
1663 | 1663 | head = True |
|
1664 | 1664 | stripline = line |
|
1665 | 1665 | if not head and line and line[0] in '+-': |
|
1666 | 1666 | # highlight trailing whitespace, but only in changed lines |
|
1667 | 1667 | stripline = line.rstrip() |
|
1668 | 1668 | prefixes = textprefixes |
|
1669 | 1669 | if head: |
|
1670 | 1670 | prefixes = headprefixes |
|
1671 | 1671 | for prefix, label in prefixes: |
|
1672 | 1672 | if stripline.startswith(prefix): |
|
1673 | 1673 | yield (stripline, label) |
|
1674 | 1674 | break |
|
1675 | 1675 | else: |
|
1676 | 1676 | yield (line, '') |
|
1677 | 1677 | if line != stripline: |
|
1678 | 1678 | yield (line[len(stripline):], 'diff.trailingwhitespace') |
|
1679 | 1679 | |
|
1680 | 1680 | def diffui(*args, **kw): |
|
1681 | 1681 | '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' |
|
1682 | 1682 | return difflabel(diff, *args, **kw) |
|
1683 | 1683 | |
|
1684 | 1684 | |
|
1685 | 1685 | def _addmodehdr(header, omode, nmode): |
|
1686 | 1686 | if omode != nmode: |
|
1687 | 1687 | header.append('old mode %s\n' % omode) |
|
1688 | 1688 | header.append('new mode %s\n' % nmode) |
|
1689 | 1689 | |
|
1690 | 1690 | def trydiff(repo, revs, ctx1, ctx2, modified, added, removed, |
|
1691 | 1691 | copy, getfilectx, opts, losedatafn, prefix): |
|
1692 | 1692 | |
|
1693 | 1693 | def join(f): |
|
1694 | 1694 | return os.path.join(prefix, f) |
|
1695 | 1695 | |
|
1696 | 1696 | date1 = util.datestr(ctx1.date()) |
|
1697 | 1697 | man1 = ctx1.manifest() |
|
1698 | 1698 | |
|
1699 | 1699 | gone = set() |
|
1700 | 1700 | gitmode = {'l': '120000', 'x': '100755', '': '100644'} |
|
1701 | 1701 | |
|
1702 | 1702 | copyto = dict([(v, k) for k, v in copy.items()]) |
|
1703 | 1703 | |
|
1704 | 1704 | if opts.git: |
|
1705 | 1705 | revs = None |
|
1706 | 1706 | |
|
1707 | 1707 | for f in sorted(modified + added + removed): |
|
1708 | 1708 | to = None |
|
1709 | 1709 | tn = None |
|
1710 | 1710 | dodiff = True |
|
1711 | 1711 | header = [] |
|
1712 | 1712 | if f in man1: |
|
1713 | 1713 | to = getfilectx(f, ctx1).data() |
|
1714 | 1714 | if f not in removed: |
|
1715 | 1715 | tn = getfilectx(f, ctx2).data() |
|
1716 | 1716 | a, b = f, f |
|
1717 | 1717 | if opts.git or losedatafn: |
|
1718 | 1718 | if f in added: |
|
1719 | 1719 | mode = gitmode[ctx2.flags(f)] |
|
1720 | 1720 | if f in copy or f in copyto: |
|
1721 | 1721 | if opts.git: |
|
1722 | 1722 | if f in copy: |
|
1723 | 1723 | a = copy[f] |
|
1724 | 1724 | else: |
|
1725 | 1725 | a = copyto[f] |
|
1726 | 1726 | omode = gitmode[man1.flags(a)] |
|
1727 | 1727 | _addmodehdr(header, omode, mode) |
|
1728 | 1728 | if a in removed and a not in gone: |
|
1729 | 1729 | op = 'rename' |
|
1730 | 1730 | gone.add(a) |
|
1731 | 1731 | else: |
|
1732 | 1732 | op = 'copy' |
|
1733 | 1733 | header.append('%s from %s\n' % (op, join(a))) |
|
1734 | 1734 | header.append('%s to %s\n' % (op, join(f))) |
|
1735 | 1735 | to = getfilectx(a, ctx1).data() |
|
1736 | 1736 | else: |
|
1737 | 1737 | losedatafn(f) |
|
1738 | 1738 | else: |
|
1739 | 1739 | if opts.git: |
|
1740 | 1740 | header.append('new file mode %s\n' % mode) |
|
1741 | 1741 | elif ctx2.flags(f): |
|
1742 | 1742 | losedatafn(f) |
|
1743 | 1743 | # In theory, if tn was copied or renamed we should check |
|
1744 | 1744 | # if the source is binary too but the copy record already |
|
1745 | 1745 | # forces git mode. |
|
1746 | 1746 | if util.binary(tn): |
|
1747 | 1747 | if opts.git: |
|
1748 | 1748 | dodiff = 'binary' |
|
1749 | 1749 | else: |
|
1750 | 1750 | losedatafn(f) |
|
1751 | 1751 | if not opts.git and not tn: |
|
1752 | 1752 | # regular diffs cannot represent new empty file |
|
1753 | 1753 | losedatafn(f) |
|
1754 | 1754 | elif f in removed: |
|
1755 | 1755 | if opts.git: |
|
1756 | 1756 | # have we already reported a copy above? |
|
1757 | 1757 | if ((f in copy and copy[f] in added |
|
1758 | 1758 | and copyto[copy[f]] == f) or |
|
1759 | 1759 | (f in copyto and copyto[f] in added |
|
1760 | 1760 | and copy[copyto[f]] == f)): |
|
1761 | 1761 | dodiff = False |
|
1762 | 1762 | else: |
|
1763 | 1763 | header.append('deleted file mode %s\n' % |
|
1764 | 1764 | gitmode[man1.flags(f)]) |
|
1765 | 1765 | elif not to or util.binary(to): |
|
1766 | 1766 | # regular diffs cannot represent empty file deletion |
|
1767 | 1767 | losedatafn(f) |
|
1768 | 1768 | else: |
|
1769 | 1769 | oflag = man1.flags(f) |
|
1770 | 1770 | nflag = ctx2.flags(f) |
|
1771 | 1771 | binary = util.binary(to) or util.binary(tn) |
|
1772 | 1772 | if opts.git: |
|
1773 | 1773 | _addmodehdr(header, gitmode[oflag], gitmode[nflag]) |
|
1774 | 1774 | if binary: |
|
1775 | 1775 | dodiff = 'binary' |
|
1776 | 1776 | elif binary or nflag != oflag: |
|
1777 | 1777 | losedatafn(f) |
|
1778 | 1778 | if opts.git: |
|
1779 | 1779 | header.insert(0, mdiff.diffline(revs, join(a), join(b), opts)) |
|
1780 | 1780 | |
|
1781 | 1781 | if dodiff: |
|
1782 | 1782 | if dodiff == 'binary': |
|
1783 | 1783 | text = b85diff(to, tn) |
|
1784 | 1784 | else: |
|
1785 | 1785 | text = mdiff.unidiff(to, date1, |
|
1786 | 1786 | # ctx2 date may be dynamic |
|
1787 | 1787 | tn, util.datestr(ctx2.date()), |
|
1788 | 1788 | join(a), join(b), revs, opts=opts) |
|
1789 | 1789 | if header and (text or len(header) > 1): |
|
1790 | 1790 | yield ''.join(header) |
|
1791 | 1791 | if text: |
|
1792 | 1792 | yield text |
|
1793 | 1793 | |
|
1794 | 1794 | def diffstatsum(stats): |
|
1795 | 1795 | maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False |
|
1796 | 1796 | for f, a, r, b in stats: |
|
1797 | 1797 | maxfile = max(maxfile, encoding.colwidth(f)) |
|
1798 | 1798 | maxtotal = max(maxtotal, a + r) |
|
1799 | 1799 | addtotal += a |
|
1800 | 1800 | removetotal += r |
|
1801 | 1801 | binary = binary or b |
|
1802 | 1802 | |
|
1803 | 1803 | return maxfile, maxtotal, addtotal, removetotal, binary |
|
1804 | 1804 | |
|
1805 | 1805 | def diffstatdata(lines): |
|
1806 | 1806 | diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$') |
|
1807 | 1807 | |
|
1808 | 1808 | results = [] |
|
1809 | 1809 | filename, adds, removes, isbinary = None, 0, 0, False |
|
1810 | 1810 | |
|
1811 | 1811 | def addresult(): |
|
1812 | 1812 | if filename: |
|
1813 | 1813 | results.append((filename, adds, removes, isbinary)) |
|
1814 | 1814 | |
|
1815 | 1815 | for line in lines: |
|
1816 | 1816 | if line.startswith('diff'): |
|
1817 | 1817 | addresult() |
|
1818 | 1818 | # set numbers to 0 anyway when starting new file |
|
1819 | 1819 | adds, removes, isbinary = 0, 0, False |
|
1820 | 1820 | if line.startswith('diff --git'): |
|
1821 | 1821 | filename = gitre.search(line).group(1) |
|
1822 | 1822 | elif line.startswith('diff -r'): |
|
1823 | 1823 | # format: "diff -r ... -r ... filename" |
|
1824 | 1824 | filename = diffre.search(line).group(1) |
|
1825 | 1825 | elif line.startswith('+') and not line.startswith('+++ '): |
|
1826 | 1826 | adds += 1 |
|
1827 | 1827 | elif line.startswith('-') and not line.startswith('--- '): |
|
1828 | 1828 | removes += 1 |
|
1829 | 1829 | elif (line.startswith('GIT binary patch') or |
|
1830 | 1830 | line.startswith('Binary file')): |
|
1831 | 1831 | isbinary = True |
|
1832 | 1832 | addresult() |
|
1833 | 1833 | return results |
|
1834 | 1834 | |
|
1835 | 1835 | def diffstat(lines, width=80, git=False): |
|
1836 | 1836 | output = [] |
|
1837 | 1837 | stats = diffstatdata(lines) |
|
1838 | 1838 | maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats) |
|
1839 | 1839 | |
|
1840 | 1840 | countwidth = len(str(maxtotal)) |
|
1841 | 1841 | if hasbinary and countwidth < 3: |
|
1842 | 1842 | countwidth = 3 |
|
1843 | 1843 | graphwidth = width - countwidth - maxname - 6 |
|
1844 | 1844 | if graphwidth < 10: |
|
1845 | 1845 | graphwidth = 10 |
|
1846 | 1846 | |
|
1847 | 1847 | def scale(i): |
|
1848 | 1848 | if maxtotal <= graphwidth: |
|
1849 | 1849 | return i |
|
1850 | 1850 | # If diffstat runs out of room it doesn't print anything, |
|
1851 | 1851 | # which isn't very useful, so always print at least one + or - |
|
1852 | 1852 | # if there were at least some changes. |
|
1853 | 1853 | return max(i * graphwidth // maxtotal, int(bool(i))) |
|
1854 | 1854 | |
|
1855 | 1855 | for filename, adds, removes, isbinary in stats: |
|
1856 | 1856 | if isbinary: |
|
1857 | 1857 | count = 'Bin' |
|
1858 | 1858 | else: |
|
1859 | 1859 | count = adds + removes |
|
1860 | 1860 | pluses = '+' * scale(adds) |
|
1861 | 1861 | minuses = '-' * scale(removes) |
|
1862 | 1862 | output.append(' %s%s | %*s %s%s\n' % |
|
1863 | 1863 | (filename, ' ' * (maxname - encoding.colwidth(filename)), |
|
1864 | 1864 | countwidth, count, pluses, minuses)) |
|
1865 | 1865 | |
|
1866 | 1866 | if stats: |
|
1867 | 1867 | output.append(_(' %d files changed, %d insertions(+), ' |
|
1868 | 1868 | '%d deletions(-)\n') |
|
1869 | 1869 | % (len(stats), totaladds, totalremoves)) |
|
1870 | 1870 | |
|
1871 | 1871 | return ''.join(output) |
|
1872 | 1872 | |
|
1873 | 1873 | def diffstatui(*args, **kw): |
|
1874 | 1874 | '''like diffstat(), but yields 2-tuples of (output, label) for |
|
1875 | 1875 | ui.write() |
|
1876 | 1876 | ''' |
|
1877 | 1877 | |
|
1878 | 1878 | for line in diffstat(*args, **kw).splitlines(): |
|
1879 | 1879 | if line and line[-1] in '+-': |
|
1880 | 1880 | name, graph = line.rsplit(' ', 1) |
|
1881 | 1881 | yield (name + ' ', '') |
|
1882 | 1882 | m = re.search(r'\++', graph) |
|
1883 | 1883 | if m: |
|
1884 | 1884 | yield (m.group(0), 'diffstat.inserted') |
|
1885 | 1885 | m = re.search(r'-+', graph) |
|
1886 | 1886 | if m: |
|
1887 | 1887 | yield (m.group(0), 'diffstat.deleted') |
|
1888 | 1888 | else: |
|
1889 | 1889 | yield (line, '') |
|
1890 | 1890 | yield ('\n', '') |
@@ -1,453 +1,453 | |||
|
1 | 1 | # Copyright (C) 2004, 2005 Canonical Ltd |
|
2 | 2 | # |
|
3 | 3 | # This program is free software; you can redistribute it and/or modify |
|
4 | 4 | # it under the terms of the GNU General Public License as published by |
|
5 | 5 | # the Free Software Foundation; either version 2 of the License, or |
|
6 | 6 | # (at your option) any later version. |
|
7 | 7 | # |
|
8 | 8 | # This program is distributed in the hope that it will be useful, |
|
9 | 9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | 10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | 11 | # GNU General Public License for more details. |
|
12 | 12 | # |
|
13 | 13 | # You should have received a copy of the GNU General Public License |
|
14 | 14 | # along with this program; if not, see <http://www.gnu.org/licenses/>. |
|
15 | 15 | |
|
16 | 16 | # mbp: "you know that thing where cvs gives you conflict markers?" |
|
17 | 17 | # s: "i hate that." |
|
18 | 18 | |
|
19 | 19 | from i18n import _ |
|
20 | 20 | import scmutil, util, mdiff |
|
21 | 21 | import sys, os |
|
22 | 22 | |
|
23 | 23 | class CantReprocessAndShowBase(Exception): |
|
24 | 24 | pass |
|
25 | 25 | |
|
26 | 26 | def intersect(ra, rb): |
|
27 | 27 | """Given two ranges return the range where they intersect or None. |
|
28 | 28 | |
|
29 | 29 | >>> intersect((0, 10), (0, 6)) |
|
30 | 30 | (0, 6) |
|
31 | 31 | >>> intersect((0, 10), (5, 15)) |
|
32 | 32 | (5, 10) |
|
33 | 33 | >>> intersect((0, 10), (10, 15)) |
|
34 | 34 | >>> intersect((0, 9), (10, 15)) |
|
35 | 35 | >>> intersect((0, 9), (7, 15)) |
|
36 | 36 | (7, 9) |
|
37 | 37 | """ |
|
38 | 38 | assert ra[0] <= ra[1] |
|
39 | 39 | assert rb[0] <= rb[1] |
|
40 | 40 | |
|
41 | 41 | sa = max(ra[0], rb[0]) |
|
42 | 42 | sb = min(ra[1], rb[1]) |
|
43 | 43 | if sa < sb: |
|
44 | 44 | return sa, sb |
|
45 | 45 | else: |
|
46 | 46 | return None |
|
47 | 47 | |
|
48 | 48 | def compare_range(a, astart, aend, b, bstart, bend): |
|
49 | 49 | """Compare a[astart:aend] == b[bstart:bend], without slicing. |
|
50 | 50 | """ |
|
51 | 51 | if (aend - astart) != (bend - bstart): |
|
52 | 52 | return False |
|
53 | 53 | for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)): |
|
54 | 54 | if a[ia] != b[ib]: |
|
55 | 55 | return False |
|
56 | 56 | else: |
|
57 | 57 | return True |
|
58 | 58 | |
|
59 | 59 | class Merge3Text(object): |
|
60 | 60 | """3-way merge of texts. |
|
61 | 61 | |
|
62 | 62 | Given strings BASE, OTHER, THIS, tries to produce a combined text |
|
63 | 63 | incorporating the changes from both BASE->OTHER and BASE->THIS.""" |
|
64 | 64 | def __init__(self, basetext, atext, btext, base=None, a=None, b=None): |
|
65 | 65 | self.basetext = basetext |
|
66 | 66 | self.atext = atext |
|
67 | 67 | self.btext = btext |
|
68 | 68 | if base is None: |
|
69 | 69 | base = mdiff.splitnewlines(basetext) |
|
70 | 70 | if a is None: |
|
71 | 71 | a = mdiff.splitnewlines(atext) |
|
72 | 72 | if b is None: |
|
73 | 73 | b = mdiff.splitnewlines(btext) |
|
74 | 74 | self.base = base |
|
75 | 75 | self.a = a |
|
76 | 76 | self.b = b |
|
77 | 77 | |
|
78 | 78 | def merge_lines(self, |
|
79 | 79 | name_a=None, |
|
80 | 80 | name_b=None, |
|
81 | 81 | name_base=None, |
|
82 | 82 | start_marker='<<<<<<<', |
|
83 | 83 | mid_marker='=======', |
|
84 | 84 | end_marker='>>>>>>>', |
|
85 | 85 | base_marker=None, |
|
86 | 86 | reprocess=False): |
|
87 | 87 | """Return merge in cvs-like form. |
|
88 | 88 | """ |
|
89 | 89 | self.conflicts = False |
|
90 | 90 | newline = '\n' |
|
91 | 91 | if len(self.a) > 0: |
|
92 | 92 | if self.a[0].endswith('\r\n'): |
|
93 | 93 | newline = '\r\n' |
|
94 | 94 | elif self.a[0].endswith('\r'): |
|
95 | 95 | newline = '\r' |
|
96 | 96 | if base_marker and reprocess: |
|
97 |
raise CantReprocessAndShowBase |
|
|
97 | raise CantReprocessAndShowBase | |
|
98 | 98 | if name_a: |
|
99 | 99 | start_marker = start_marker + ' ' + name_a |
|
100 | 100 | if name_b: |
|
101 | 101 | end_marker = end_marker + ' ' + name_b |
|
102 | 102 | if name_base and base_marker: |
|
103 | 103 | base_marker = base_marker + ' ' + name_base |
|
104 | 104 | merge_regions = self.merge_regions() |
|
105 | 105 | if reprocess is True: |
|
106 | 106 | merge_regions = self.reprocess_merge_regions(merge_regions) |
|
107 | 107 | for t in merge_regions: |
|
108 | 108 | what = t[0] |
|
109 | 109 | if what == 'unchanged': |
|
110 | 110 | for i in range(t[1], t[2]): |
|
111 | 111 | yield self.base[i] |
|
112 | 112 | elif what == 'a' or what == 'same': |
|
113 | 113 | for i in range(t[1], t[2]): |
|
114 | 114 | yield self.a[i] |
|
115 | 115 | elif what == 'b': |
|
116 | 116 | for i in range(t[1], t[2]): |
|
117 | 117 | yield self.b[i] |
|
118 | 118 | elif what == 'conflict': |
|
119 | 119 | self.conflicts = True |
|
120 | 120 | yield start_marker + newline |
|
121 | 121 | for i in range(t[3], t[4]): |
|
122 | 122 | yield self.a[i] |
|
123 | 123 | if base_marker is not None: |
|
124 | 124 | yield base_marker + newline |
|
125 | 125 | for i in range(t[1], t[2]): |
|
126 | 126 | yield self.base[i] |
|
127 | 127 | yield mid_marker + newline |
|
128 | 128 | for i in range(t[5], t[6]): |
|
129 | 129 | yield self.b[i] |
|
130 | 130 | yield end_marker + newline |
|
131 | 131 | else: |
|
132 | 132 | raise ValueError(what) |
|
133 | 133 | |
|
134 | 134 | def merge_annotated(self): |
|
135 | 135 | """Return merge with conflicts, showing origin of lines. |
|
136 | 136 | |
|
137 | 137 | Most useful for debugging merge. |
|
138 | 138 | """ |
|
139 | 139 | for t in self.merge_regions(): |
|
140 | 140 | what = t[0] |
|
141 | 141 | if what == 'unchanged': |
|
142 | 142 | for i in range(t[1], t[2]): |
|
143 | 143 | yield 'u | ' + self.base[i] |
|
144 | 144 | elif what == 'a' or what == 'same': |
|
145 | 145 | for i in range(t[1], t[2]): |
|
146 | 146 | yield what[0] + ' | ' + self.a[i] |
|
147 | 147 | elif what == 'b': |
|
148 | 148 | for i in range(t[1], t[2]): |
|
149 | 149 | yield 'b | ' + self.b[i] |
|
150 | 150 | elif what == 'conflict': |
|
151 | 151 | yield '<<<<\n' |
|
152 | 152 | for i in range(t[3], t[4]): |
|
153 | 153 | yield 'A | ' + self.a[i] |
|
154 | 154 | yield '----\n' |
|
155 | 155 | for i in range(t[5], t[6]): |
|
156 | 156 | yield 'B | ' + self.b[i] |
|
157 | 157 | yield '>>>>\n' |
|
158 | 158 | else: |
|
159 | 159 | raise ValueError(what) |
|
160 | 160 | |
|
161 | 161 | def merge_groups(self): |
|
162 | 162 | """Yield sequence of line groups. Each one is a tuple: |
|
163 | 163 | |
|
164 | 164 | 'unchanged', lines |
|
165 | 165 | Lines unchanged from base |
|
166 | 166 | |
|
167 | 167 | 'a', lines |
|
168 | 168 | Lines taken from a |
|
169 | 169 | |
|
170 | 170 | 'same', lines |
|
171 | 171 | Lines taken from a (and equal to b) |
|
172 | 172 | |
|
173 | 173 | 'b', lines |
|
174 | 174 | Lines taken from b |
|
175 | 175 | |
|
176 | 176 | 'conflict', base_lines, a_lines, b_lines |
|
177 | 177 | Lines from base were changed to either a or b and conflict. |
|
178 | 178 | """ |
|
179 | 179 | for t in self.merge_regions(): |
|
180 | 180 | what = t[0] |
|
181 | 181 | if what == 'unchanged': |
|
182 | 182 | yield what, self.base[t[1]:t[2]] |
|
183 | 183 | elif what == 'a' or what == 'same': |
|
184 | 184 | yield what, self.a[t[1]:t[2]] |
|
185 | 185 | elif what == 'b': |
|
186 | 186 | yield what, self.b[t[1]:t[2]] |
|
187 | 187 | elif what == 'conflict': |
|
188 | 188 | yield (what, |
|
189 | 189 | self.base[t[1]:t[2]], |
|
190 | 190 | self.a[t[3]:t[4]], |
|
191 | 191 | self.b[t[5]:t[6]]) |
|
192 | 192 | else: |
|
193 | 193 | raise ValueError(what) |
|
194 | 194 | |
|
195 | 195 | def merge_regions(self): |
|
196 | 196 | """Return sequences of matching and conflicting regions. |
|
197 | 197 | |
|
198 | 198 | This returns tuples, where the first value says what kind we |
|
199 | 199 | have: |
|
200 | 200 | |
|
201 | 201 | 'unchanged', start, end |
|
202 | 202 | Take a region of base[start:end] |
|
203 | 203 | |
|
204 | 204 | 'same', astart, aend |
|
205 | 205 | b and a are different from base but give the same result |
|
206 | 206 | |
|
207 | 207 | 'a', start, end |
|
208 | 208 | Non-clashing insertion from a[start:end] |
|
209 | 209 | |
|
210 | 210 | Method is as follows: |
|
211 | 211 | |
|
212 | 212 | The two sequences align only on regions which match the base |
|
213 | 213 | and both descendants. These are found by doing a two-way diff |
|
214 | 214 | of each one against the base, and then finding the |
|
215 | 215 | intersections between those regions. These "sync regions" |
|
216 | 216 | are by definition unchanged in both and easily dealt with. |
|
217 | 217 | |
|
218 | 218 | The regions in between can be in any of three cases: |
|
219 | 219 | conflicted, or changed on only one side. |
|
220 | 220 | """ |
|
221 | 221 | |
|
222 | 222 | # section a[0:ia] has been disposed of, etc |
|
223 | 223 | iz = ia = ib = 0 |
|
224 | 224 | |
|
225 | 225 | for region in self.find_sync_regions(): |
|
226 | 226 | zmatch, zend, amatch, aend, bmatch, bend = region |
|
227 | 227 | #print 'match base [%d:%d]' % (zmatch, zend) |
|
228 | 228 | |
|
229 | 229 | matchlen = zend - zmatch |
|
230 | 230 | assert matchlen >= 0 |
|
231 | 231 | assert matchlen == (aend - amatch) |
|
232 | 232 | assert matchlen == (bend - bmatch) |
|
233 | 233 | |
|
234 | 234 | len_a = amatch - ia |
|
235 | 235 | len_b = bmatch - ib |
|
236 | 236 | len_base = zmatch - iz |
|
237 | 237 | assert len_a >= 0 |
|
238 | 238 | assert len_b >= 0 |
|
239 | 239 | assert len_base >= 0 |
|
240 | 240 | |
|
241 | 241 | #print 'unmatched a=%d, b=%d' % (len_a, len_b) |
|
242 | 242 | |
|
243 | 243 | if len_a or len_b: |
|
244 | 244 | # try to avoid actually slicing the lists |
|
245 | 245 | equal_a = compare_range(self.a, ia, amatch, |
|
246 | 246 | self.base, iz, zmatch) |
|
247 | 247 | equal_b = compare_range(self.b, ib, bmatch, |
|
248 | 248 | self.base, iz, zmatch) |
|
249 | 249 | same = compare_range(self.a, ia, amatch, |
|
250 | 250 | self.b, ib, bmatch) |
|
251 | 251 | |
|
252 | 252 | if same: |
|
253 | 253 | yield 'same', ia, amatch |
|
254 | 254 | elif equal_a and not equal_b: |
|
255 | 255 | yield 'b', ib, bmatch |
|
256 | 256 | elif equal_b and not equal_a: |
|
257 | 257 | yield 'a', ia, amatch |
|
258 | 258 | elif not equal_a and not equal_b: |
|
259 | 259 | yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch |
|
260 | 260 | else: |
|
261 | 261 | raise AssertionError("can't handle a=b=base but unmatched") |
|
262 | 262 | |
|
263 | 263 | ia = amatch |
|
264 | 264 | ib = bmatch |
|
265 | 265 | iz = zmatch |
|
266 | 266 | |
|
267 | 267 | # if the same part of the base was deleted on both sides |
|
268 | 268 | # that's OK, we can just skip it. |
|
269 | 269 | |
|
270 | 270 | |
|
271 | 271 | if matchlen > 0: |
|
272 | 272 | assert ia == amatch |
|
273 | 273 | assert ib == bmatch |
|
274 | 274 | assert iz == zmatch |
|
275 | 275 | |
|
276 | 276 | yield 'unchanged', zmatch, zend |
|
277 | 277 | iz = zend |
|
278 | 278 | ia = aend |
|
279 | 279 | ib = bend |
|
280 | 280 | |
|
281 | 281 | def reprocess_merge_regions(self, merge_regions): |
|
282 | 282 | """Where there are conflict regions, remove the agreed lines. |
|
283 | 283 | |
|
284 | 284 | Lines where both A and B have made the same changes are |
|
285 | 285 | eliminated. |
|
286 | 286 | """ |
|
287 | 287 | for region in merge_regions: |
|
288 | 288 | if region[0] != "conflict": |
|
289 | 289 | yield region |
|
290 | 290 | continue |
|
291 | 291 | type, iz, zmatch, ia, amatch, ib, bmatch = region |
|
292 | 292 | a_region = self.a[ia:amatch] |
|
293 | 293 | b_region = self.b[ib:bmatch] |
|
294 | 294 | matches = mdiff.get_matching_blocks(''.join(a_region), |
|
295 | 295 | ''.join(b_region)) |
|
296 | 296 | next_a = ia |
|
297 | 297 | next_b = ib |
|
298 | 298 | for region_ia, region_ib, region_len in matches[:-1]: |
|
299 | 299 | region_ia += ia |
|
300 | 300 | region_ib += ib |
|
301 | 301 | reg = self.mismatch_region(next_a, region_ia, next_b, |
|
302 | 302 | region_ib) |
|
303 | 303 | if reg is not None: |
|
304 | 304 | yield reg |
|
305 | 305 | yield 'same', region_ia, region_len + region_ia |
|
306 | 306 | next_a = region_ia + region_len |
|
307 | 307 | next_b = region_ib + region_len |
|
308 | 308 | reg = self.mismatch_region(next_a, amatch, next_b, bmatch) |
|
309 | 309 | if reg is not None: |
|
310 | 310 | yield reg |
|
311 | 311 | |
|
312 | 312 | def mismatch_region(next_a, region_ia, next_b, region_ib): |
|
313 | 313 | if next_a < region_ia or next_b < region_ib: |
|
314 | 314 | return 'conflict', None, None, next_a, region_ia, next_b, region_ib |
|
315 | 315 | mismatch_region = staticmethod(mismatch_region) |
|
316 | 316 | |
|
317 | 317 | def find_sync_regions(self): |
|
318 | 318 | """Return a list of sync regions, where both descendants match the base. |
|
319 | 319 | |
|
320 | 320 | Generates a list of (base1, base2, a1, a2, b1, b2). There is |
|
321 | 321 | always a zero-length sync region at the end of all the files. |
|
322 | 322 | """ |
|
323 | 323 | |
|
324 | 324 | ia = ib = 0 |
|
325 | 325 | amatches = mdiff.get_matching_blocks(self.basetext, self.atext) |
|
326 | 326 | bmatches = mdiff.get_matching_blocks(self.basetext, self.btext) |
|
327 | 327 | len_a = len(amatches) |
|
328 | 328 | len_b = len(bmatches) |
|
329 | 329 | |
|
330 | 330 | sl = [] |
|
331 | 331 | |
|
332 | 332 | while ia < len_a and ib < len_b: |
|
333 | 333 | abase, amatch, alen = amatches[ia] |
|
334 | 334 | bbase, bmatch, blen = bmatches[ib] |
|
335 | 335 | |
|
336 | 336 | # there is an unconflicted block at i; how long does it |
|
337 | 337 | # extend? until whichever one ends earlier. |
|
338 | 338 | i = intersect((abase, abase + alen), (bbase, bbase + blen)) |
|
339 | 339 | if i: |
|
340 | 340 | intbase = i[0] |
|
341 | 341 | intend = i[1] |
|
342 | 342 | intlen = intend - intbase |
|
343 | 343 | |
|
344 | 344 | # found a match of base[i[0], i[1]]; this may be less than |
|
345 | 345 | # the region that matches in either one |
|
346 | 346 | assert intlen <= alen |
|
347 | 347 | assert intlen <= blen |
|
348 | 348 | assert abase <= intbase |
|
349 | 349 | assert bbase <= intbase |
|
350 | 350 | |
|
351 | 351 | asub = amatch + (intbase - abase) |
|
352 | 352 | bsub = bmatch + (intbase - bbase) |
|
353 | 353 | aend = asub + intlen |
|
354 | 354 | bend = bsub + intlen |
|
355 | 355 | |
|
356 | 356 | assert self.base[intbase:intend] == self.a[asub:aend], \ |
|
357 | 357 | (self.base[intbase:intend], self.a[asub:aend]) |
|
358 | 358 | |
|
359 | 359 | assert self.base[intbase:intend] == self.b[bsub:bend] |
|
360 | 360 | |
|
361 | 361 | sl.append((intbase, intend, |
|
362 | 362 | asub, aend, |
|
363 | 363 | bsub, bend)) |
|
364 | 364 | |
|
365 | 365 | # advance whichever one ends first in the base text |
|
366 | 366 | if (abase + alen) < (bbase + blen): |
|
367 | 367 | ia += 1 |
|
368 | 368 | else: |
|
369 | 369 | ib += 1 |
|
370 | 370 | |
|
371 | 371 | intbase = len(self.base) |
|
372 | 372 | abase = len(self.a) |
|
373 | 373 | bbase = len(self.b) |
|
374 | 374 | sl.append((intbase, intbase, abase, abase, bbase, bbase)) |
|
375 | 375 | |
|
376 | 376 | return sl |
|
377 | 377 | |
|
378 | 378 | def find_unconflicted(self): |
|
379 | 379 | """Return a list of ranges in base that are not conflicted.""" |
|
380 | 380 | am = mdiff.get_matching_blocks(self.basetext, self.atext) |
|
381 | 381 | bm = mdiff.get_matching_blocks(self.basetext, self.btext) |
|
382 | 382 | |
|
383 | 383 | unc = [] |
|
384 | 384 | |
|
385 | 385 | while am and bm: |
|
386 | 386 | # there is an unconflicted block at i; how long does it |
|
387 | 387 | # extend? until whichever one ends earlier. |
|
388 | 388 | a1 = am[0][0] |
|
389 | 389 | a2 = a1 + am[0][2] |
|
390 | 390 | b1 = bm[0][0] |
|
391 | 391 | b2 = b1 + bm[0][2] |
|
392 | 392 | i = intersect((a1, a2), (b1, b2)) |
|
393 | 393 | if i: |
|
394 | 394 | unc.append(i) |
|
395 | 395 | |
|
396 | 396 | if a2 < b2: |
|
397 | 397 | del am[0] |
|
398 | 398 | else: |
|
399 | 399 | del bm[0] |
|
400 | 400 | |
|
401 | 401 | return unc |
|
402 | 402 | |
|
403 | 403 | def simplemerge(ui, local, base, other, **opts): |
|
404 | 404 | def readfile(filename): |
|
405 | 405 | f = open(filename, "rb") |
|
406 | 406 | text = f.read() |
|
407 | 407 | f.close() |
|
408 | 408 | if util.binary(text): |
|
409 | 409 | msg = _("%s looks like a binary file.") % filename |
|
410 | 410 | if not opts.get('quiet'): |
|
411 | 411 | ui.warn(_('warning: %s\n') % msg) |
|
412 | 412 | if not opts.get('text'): |
|
413 | 413 | raise util.Abort(msg) |
|
414 | 414 | return text |
|
415 | 415 | |
|
416 | 416 | name_a = local |
|
417 | 417 | name_b = other |
|
418 | 418 | labels = opts.get('label', []) |
|
419 | 419 | if labels: |
|
420 | 420 | name_a = labels.pop(0) |
|
421 | 421 | if labels: |
|
422 | 422 | name_b = labels.pop(0) |
|
423 | 423 | if labels: |
|
424 | 424 | raise util.Abort(_("can only specify two labels.")) |
|
425 | 425 | |
|
426 | 426 | try: |
|
427 | 427 | localtext = readfile(local) |
|
428 | 428 | basetext = readfile(base) |
|
429 | 429 | othertext = readfile(other) |
|
430 | 430 | except util.Abort: |
|
431 | 431 | return 1 |
|
432 | 432 | |
|
433 | 433 | local = os.path.realpath(local) |
|
434 | 434 | if not opts.get('print'): |
|
435 | 435 | opener = scmutil.opener(os.path.dirname(local)) |
|
436 | 436 | out = opener(os.path.basename(local), "w", atomictemp=True) |
|
437 | 437 | else: |
|
438 | 438 | out = sys.stdout |
|
439 | 439 | |
|
440 | 440 | reprocess = not opts.get('no_minimal') |
|
441 | 441 | |
|
442 | 442 | m3 = Merge3Text(basetext, localtext, othertext) |
|
443 | 443 | for line in m3.merge_lines(name_a=name_a, name_b=name_b, |
|
444 | 444 | reprocess=reprocess): |
|
445 | 445 | out.write(line) |
|
446 | 446 | |
|
447 | 447 | if not opts.get('print'): |
|
448 | 448 | out.close() |
|
449 | 449 | |
|
450 | 450 | if m3.conflicts: |
|
451 | 451 | if not opts.get('quiet'): |
|
452 | 452 | ui.warn(_("warning: conflicts during merge.\n")) |
|
453 | 453 | return 1 |
@@ -1,452 +1,452 | |||
|
1 | 1 | # win32.py - utility functions that use win32 API |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import encoding |
|
9 | 9 | import ctypes, errno, os, struct, subprocess, random |
|
10 | 10 | |
|
11 | 11 | _kernel32 = ctypes.windll.kernel32 |
|
12 | 12 | _advapi32 = ctypes.windll.advapi32 |
|
13 | 13 | _user32 = ctypes.windll.user32 |
|
14 | 14 | |
|
15 | 15 | _BOOL = ctypes.c_long |
|
16 | 16 | _WORD = ctypes.c_ushort |
|
17 | 17 | _DWORD = ctypes.c_ulong |
|
18 | 18 | _UINT = ctypes.c_uint |
|
19 | 19 | _LONG = ctypes.c_long |
|
20 | 20 | _LPCSTR = _LPSTR = ctypes.c_char_p |
|
21 | 21 | _HANDLE = ctypes.c_void_p |
|
22 | 22 | _HWND = _HANDLE |
|
23 | 23 | |
|
24 | 24 | _INVALID_HANDLE_VALUE = _HANDLE(-1).value |
|
25 | 25 | |
|
26 | 26 | # GetLastError |
|
27 | 27 | _ERROR_SUCCESS = 0 |
|
28 | 28 | _ERROR_INVALID_PARAMETER = 87 |
|
29 | 29 | _ERROR_INSUFFICIENT_BUFFER = 122 |
|
30 | 30 | |
|
31 | 31 | # WPARAM is defined as UINT_PTR (unsigned type) |
|
32 | 32 | # LPARAM is defined as LONG_PTR (signed type) |
|
33 | 33 | if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p): |
|
34 | 34 | _WPARAM = ctypes.c_ulong |
|
35 | 35 | _LPARAM = ctypes.c_long |
|
36 | 36 | elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p): |
|
37 | 37 | _WPARAM = ctypes.c_ulonglong |
|
38 | 38 | _LPARAM = ctypes.c_longlong |
|
39 | 39 | |
|
40 | 40 | class _FILETIME(ctypes.Structure): |
|
41 | 41 | _fields_ = [('dwLowDateTime', _DWORD), |
|
42 | 42 | ('dwHighDateTime', _DWORD)] |
|
43 | 43 | |
|
44 | 44 | class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure): |
|
45 | 45 | _fields_ = [('dwFileAttributes', _DWORD), |
|
46 | 46 | ('ftCreationTime', _FILETIME), |
|
47 | 47 | ('ftLastAccessTime', _FILETIME), |
|
48 | 48 | ('ftLastWriteTime', _FILETIME), |
|
49 | 49 | ('dwVolumeSerialNumber', _DWORD), |
|
50 | 50 | ('nFileSizeHigh', _DWORD), |
|
51 | 51 | ('nFileSizeLow', _DWORD), |
|
52 | 52 | ('nNumberOfLinks', _DWORD), |
|
53 | 53 | ('nFileIndexHigh', _DWORD), |
|
54 | 54 | ('nFileIndexLow', _DWORD)] |
|
55 | 55 | |
|
56 | 56 | # CreateFile |
|
57 | 57 | _FILE_SHARE_READ = 0x00000001 |
|
58 | 58 | _FILE_SHARE_WRITE = 0x00000002 |
|
59 | 59 | _FILE_SHARE_DELETE = 0x00000004 |
|
60 | 60 | |
|
61 | 61 | _OPEN_EXISTING = 3 |
|
62 | 62 | |
|
63 | 63 | # SetFileAttributes |
|
64 | 64 | _FILE_ATTRIBUTE_NORMAL = 0x80 |
|
65 | 65 | _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000 |
|
66 | 66 | |
|
67 | 67 | # Process Security and Access Rights |
|
68 | 68 | _PROCESS_QUERY_INFORMATION = 0x0400 |
|
69 | 69 | |
|
70 | 70 | # GetExitCodeProcess |
|
71 | 71 | _STILL_ACTIVE = 259 |
|
72 | 72 | |
|
73 | 73 | # registry |
|
74 | 74 | _HKEY_CURRENT_USER = 0x80000001L |
|
75 | 75 | _HKEY_LOCAL_MACHINE = 0x80000002L |
|
76 | 76 | _KEY_READ = 0x20019 |
|
77 | 77 | _REG_SZ = 1 |
|
78 | 78 | _REG_DWORD = 4 |
|
79 | 79 | |
|
80 | 80 | class _STARTUPINFO(ctypes.Structure): |
|
81 | 81 | _fields_ = [('cb', _DWORD), |
|
82 | 82 | ('lpReserved', _LPSTR), |
|
83 | 83 | ('lpDesktop', _LPSTR), |
|
84 | 84 | ('lpTitle', _LPSTR), |
|
85 | 85 | ('dwX', _DWORD), |
|
86 | 86 | ('dwY', _DWORD), |
|
87 | 87 | ('dwXSize', _DWORD), |
|
88 | 88 | ('dwYSize', _DWORD), |
|
89 | 89 | ('dwXCountChars', _DWORD), |
|
90 | 90 | ('dwYCountChars', _DWORD), |
|
91 | 91 | ('dwFillAttribute', _DWORD), |
|
92 | 92 | ('dwFlags', _DWORD), |
|
93 | 93 | ('wShowWindow', _WORD), |
|
94 | 94 | ('cbReserved2', _WORD), |
|
95 | 95 | ('lpReserved2', ctypes.c_char_p), |
|
96 | 96 | ('hStdInput', _HANDLE), |
|
97 | 97 | ('hStdOutput', _HANDLE), |
|
98 | 98 | ('hStdError', _HANDLE)] |
|
99 | 99 | |
|
100 | 100 | class _PROCESS_INFORMATION(ctypes.Structure): |
|
101 | 101 | _fields_ = [('hProcess', _HANDLE), |
|
102 | 102 | ('hThread', _HANDLE), |
|
103 | 103 | ('dwProcessId', _DWORD), |
|
104 | 104 | ('dwThreadId', _DWORD)] |
|
105 | 105 | |
|
106 | 106 | _DETACHED_PROCESS = 0x00000008 |
|
107 | 107 | _STARTF_USESHOWWINDOW = 0x00000001 |
|
108 | 108 | _SW_HIDE = 0 |
|
109 | 109 | |
|
110 | 110 | class _COORD(ctypes.Structure): |
|
111 | 111 | _fields_ = [('X', ctypes.c_short), |
|
112 | 112 | ('Y', ctypes.c_short)] |
|
113 | 113 | |
|
114 | 114 | class _SMALL_RECT(ctypes.Structure): |
|
115 | 115 | _fields_ = [('Left', ctypes.c_short), |
|
116 | 116 | ('Top', ctypes.c_short), |
|
117 | 117 | ('Right', ctypes.c_short), |
|
118 | 118 | ('Bottom', ctypes.c_short)] |
|
119 | 119 | |
|
120 | 120 | class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): |
|
121 | 121 | _fields_ = [('dwSize', _COORD), |
|
122 | 122 | ('dwCursorPosition', _COORD), |
|
123 | 123 | ('wAttributes', _WORD), |
|
124 | 124 | ('srWindow', _SMALL_RECT), |
|
125 | 125 | ('dwMaximumWindowSize', _COORD)] |
|
126 | 126 | |
|
127 | 127 | _STD_ERROR_HANDLE = _DWORD(-12).value |
|
128 | 128 | |
|
129 | 129 | # types of parameters of C functions used (required by pypy) |
|
130 | 130 | |
|
131 | 131 | _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p, |
|
132 | 132 | _DWORD, _DWORD, _HANDLE] |
|
133 | 133 | _kernel32.CreateFileA.restype = _HANDLE |
|
134 | 134 | |
|
135 | 135 | _kernel32.GetFileInformationByHandle.argtypes = [_HANDLE, ctypes.c_void_p] |
|
136 | 136 | _kernel32.GetFileInformationByHandle.restype = _BOOL |
|
137 | 137 | |
|
138 | 138 | _kernel32.CloseHandle.argtypes = [_HANDLE] |
|
139 | 139 | _kernel32.CloseHandle.restype = _BOOL |
|
140 | 140 | |
|
141 | 141 | try: |
|
142 | 142 | _kernel32.CreateHardLinkA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p] |
|
143 | 143 | _kernel32.CreateHardLinkA.restype = _BOOL |
|
144 | 144 | except AttributeError: |
|
145 | 145 | pass |
|
146 | 146 | |
|
147 | 147 | _kernel32.SetFileAttributesA.argtypes = [_LPCSTR, _DWORD] |
|
148 | 148 | _kernel32.SetFileAttributesA.restype = _BOOL |
|
149 | 149 | |
|
150 | 150 | _kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD] |
|
151 | 151 | _kernel32.OpenProcess.restype = _HANDLE |
|
152 | 152 | |
|
153 | 153 | _kernel32.GetExitCodeProcess.argtypes = [_HANDLE, ctypes.c_void_p] |
|
154 | 154 | _kernel32.GetExitCodeProcess.restype = _BOOL |
|
155 | 155 | |
|
156 | 156 | _kernel32.GetLastError.argtypes = [] |
|
157 | 157 | _kernel32.GetLastError.restype = _DWORD |
|
158 | 158 | |
|
159 | 159 | _kernel32.GetModuleFileNameA.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD] |
|
160 | 160 | _kernel32.GetModuleFileNameA.restype = _DWORD |
|
161 | 161 | |
|
162 | 162 | _kernel32.CreateProcessA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p, |
|
163 | 163 | ctypes.c_void_p, _BOOL, _DWORD, ctypes.c_void_p, _LPCSTR, ctypes.c_void_p, |
|
164 | 164 | ctypes.c_void_p] |
|
165 | 165 | _kernel32.CreateProcessA.restype = _BOOL |
|
166 | 166 | |
|
167 | 167 | _kernel32.ExitProcess.argtypes = [_UINT] |
|
168 | 168 | _kernel32.ExitProcess.restype = None |
|
169 | 169 | |
|
170 | 170 | _kernel32.GetCurrentProcessId.argtypes = [] |
|
171 | 171 | _kernel32.GetCurrentProcessId.restype = _DWORD |
|
172 | 172 | |
|
173 | 173 | _SIGNAL_HANDLER = ctypes.WINFUNCTYPE(_BOOL, _DWORD) |
|
174 | 174 | _kernel32.SetConsoleCtrlHandler.argtypes = [_SIGNAL_HANDLER, _BOOL] |
|
175 | 175 | _kernel32.SetConsoleCtrlHandler.restype = _BOOL |
|
176 | 176 | |
|
177 | 177 | _kernel32.GetStdHandle.argtypes = [_DWORD] |
|
178 | 178 | _kernel32.GetStdHandle.restype = _HANDLE |
|
179 | 179 | |
|
180 | 180 | _kernel32.GetConsoleScreenBufferInfo.argtypes = [_HANDLE, ctypes.c_void_p] |
|
181 | 181 | _kernel32.GetConsoleScreenBufferInfo.restype = _BOOL |
|
182 | 182 | |
|
183 | 183 | _advapi32.RegOpenKeyExA.argtypes = [_HANDLE, _LPCSTR, _DWORD, _DWORD, |
|
184 | 184 | ctypes.c_void_p] |
|
185 | 185 | _advapi32.RegOpenKeyExA.restype = _LONG |
|
186 | 186 | |
|
187 | 187 | _advapi32.RegQueryValueExA.argtypes = [_HANDLE, _LPCSTR, ctypes.c_void_p, |
|
188 | 188 | ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] |
|
189 | 189 | _advapi32.RegQueryValueExA.restype = _LONG |
|
190 | 190 | |
|
191 | 191 | _advapi32.RegCloseKey.argtypes = [_HANDLE] |
|
192 | 192 | _advapi32.RegCloseKey.restype = _LONG |
|
193 | 193 | |
|
194 | 194 | _advapi32.GetUserNameA.argtypes = [ctypes.c_void_p, ctypes.c_void_p] |
|
195 | 195 | _advapi32.GetUserNameA.restype = _BOOL |
|
196 | 196 | |
|
197 | 197 | _user32.GetWindowThreadProcessId.argtypes = [_HANDLE, ctypes.c_void_p] |
|
198 | 198 | _user32.GetWindowThreadProcessId.restype = _DWORD |
|
199 | 199 | |
|
200 | 200 | _user32.ShowWindow.argtypes = [_HANDLE, ctypes.c_int] |
|
201 | 201 | _user32.ShowWindow.restype = _BOOL |
|
202 | 202 | |
|
203 | 203 | _WNDENUMPROC = ctypes.WINFUNCTYPE(_BOOL, _HWND, _LPARAM) |
|
204 | 204 | _user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM] |
|
205 | 205 | _user32.EnumWindows.restype = _BOOL |
|
206 | 206 | |
|
207 | 207 | def _raiseoserror(name): |
|
208 | 208 | err = ctypes.WinError() |
|
209 | 209 | raise OSError(err.errno, '%s: %s' % (name, err.strerror)) |
|
210 | 210 | |
|
211 | 211 | def _getfileinfo(name): |
|
212 | 212 | fh = _kernel32.CreateFileA(name, 0, |
|
213 | 213 | _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE, |
|
214 | 214 | None, _OPEN_EXISTING, 0, None) |
|
215 | 215 | if fh == _INVALID_HANDLE_VALUE: |
|
216 | 216 | _raiseoserror(name) |
|
217 | 217 | try: |
|
218 | 218 | fi = _BY_HANDLE_FILE_INFORMATION() |
|
219 | 219 | if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)): |
|
220 | 220 | _raiseoserror(name) |
|
221 | 221 | return fi |
|
222 | 222 | finally: |
|
223 | 223 | _kernel32.CloseHandle(fh) |
|
224 | 224 | |
|
225 | 225 | def oslink(src, dst): |
|
226 | 226 | try: |
|
227 | 227 | if not _kernel32.CreateHardLinkA(dst, src, None): |
|
228 | 228 | _raiseoserror(src) |
|
229 | 229 | except AttributeError: # Wine doesn't support this function |
|
230 | 230 | _raiseoserror(src) |
|
231 | 231 | |
|
232 | 232 | def nlinks(name): |
|
233 | 233 | '''return number of hardlinks for the given file''' |
|
234 | 234 | return _getfileinfo(name).nNumberOfLinks |
|
235 | 235 | |
|
236 | 236 | def samefile(fpath1, fpath2): |
|
237 | 237 | '''Returns whether fpath1 and fpath2 refer to the same file. This is only |
|
238 | 238 | guaranteed to work for files, not directories.''' |
|
239 | 239 | res1 = _getfileinfo(fpath1) |
|
240 | 240 | res2 = _getfileinfo(fpath2) |
|
241 | 241 | return (res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber |
|
242 | 242 | and res1.nFileIndexHigh == res2.nFileIndexHigh |
|
243 | 243 | and res1.nFileIndexLow == res2.nFileIndexLow) |
|
244 | 244 | |
|
245 | 245 | def samedevice(fpath1, fpath2): |
|
246 | 246 | '''Returns whether fpath1 and fpath2 are on the same device. This is only |
|
247 | 247 | guaranteed to work for files, not directories.''' |
|
248 | 248 | res1 = _getfileinfo(fpath1) |
|
249 | 249 | res2 = _getfileinfo(fpath2) |
|
250 | 250 | return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber |
|
251 | 251 | |
|
252 | 252 | def testpid(pid): |
|
253 | 253 | '''return True if pid is still running or unable to |
|
254 | 254 | determine, False otherwise''' |
|
255 | 255 | h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid) |
|
256 | 256 | if h: |
|
257 | 257 | try: |
|
258 | 258 | status = _DWORD() |
|
259 | 259 | if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)): |
|
260 | 260 | return status.value == _STILL_ACTIVE |
|
261 | 261 | finally: |
|
262 | 262 | _kernel32.CloseHandle(h) |
|
263 | 263 | return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER |
|
264 | 264 | |
|
265 | 265 | def lookupreg(key, valname=None, scope=None): |
|
266 | 266 | ''' Look up a key/value name in the Windows registry. |
|
267 | 267 | |
|
268 | 268 | valname: value name. If unspecified, the default value for the key |
|
269 | 269 | is used. |
|
270 | 270 | scope: optionally specify scope for registry lookup, this can be |
|
271 | 271 | a sequence of scopes to look up in order. Default (CURRENT_USER, |
|
272 | 272 | LOCAL_MACHINE). |
|
273 | 273 | ''' |
|
274 | 274 | byref = ctypes.byref |
|
275 | 275 | if scope is None: |
|
276 | 276 | scope = (_HKEY_CURRENT_USER, _HKEY_LOCAL_MACHINE) |
|
277 | 277 | elif not isinstance(scope, (list, tuple)): |
|
278 | 278 | scope = (scope,) |
|
279 | 279 | for s in scope: |
|
280 | 280 | kh = _HANDLE() |
|
281 | 281 | res = _advapi32.RegOpenKeyExA(s, key, 0, _KEY_READ, ctypes.byref(kh)) |
|
282 | 282 | if res != _ERROR_SUCCESS: |
|
283 | 283 | continue |
|
284 | 284 | try: |
|
285 | 285 | size = _DWORD(600) |
|
286 | 286 | type = _DWORD() |
|
287 | 287 | buf = ctypes.create_string_buffer(size.value + 1) |
|
288 | 288 | res = _advapi32.RegQueryValueExA(kh.value, valname, None, |
|
289 | 289 | byref(type), buf, byref(size)) |
|
290 | 290 | if res != _ERROR_SUCCESS: |
|
291 | 291 | continue |
|
292 | 292 | if type.value == _REG_SZ: |
|
293 | 293 | # never let a Unicode string escape into the wild |
|
294 | 294 | return encoding.tolocal(buf.value.encode('UTF-8')) |
|
295 | 295 | elif type.value == _REG_DWORD: |
|
296 | 296 | fmt = '<L' |
|
297 | 297 | s = ctypes.string_at(byref(buf), struct.calcsize(fmt)) |
|
298 | 298 | return struct.unpack(fmt, s)[0] |
|
299 | 299 | finally: |
|
300 | 300 | _advapi32.RegCloseKey(kh.value) |
|
301 | 301 | |
|
302 | 302 | def executablepath(): |
|
303 | 303 | '''return full path of hg.exe''' |
|
304 | 304 | size = 600 |
|
305 | 305 | buf = ctypes.create_string_buffer(size + 1) |
|
306 | 306 | len = _kernel32.GetModuleFileNameA(None, ctypes.byref(buf), size) |
|
307 | 307 | if len == 0: |
|
308 |
raise ctypes.WinError |
|
|
308 | raise ctypes.WinError | |
|
309 | 309 | elif len == size: |
|
310 | 310 | raise ctypes.WinError(_ERROR_INSUFFICIENT_BUFFER) |
|
311 | 311 | return buf.value |
|
312 | 312 | |
|
313 | 313 | def getuser(): |
|
314 | 314 | '''return name of current user''' |
|
315 | 315 | size = _DWORD(300) |
|
316 | 316 | buf = ctypes.create_string_buffer(size.value + 1) |
|
317 | 317 | if not _advapi32.GetUserNameA(ctypes.byref(buf), ctypes.byref(size)): |
|
318 |
raise ctypes.WinError |
|
|
318 | raise ctypes.WinError | |
|
319 | 319 | return buf.value |
|
320 | 320 | |
|
321 | 321 | _signalhandler = [] |
|
322 | 322 | |
|
323 | 323 | def setsignalhandler(): |
|
324 | 324 | '''Register a termination handler for console events including |
|
325 | 325 | CTRL+C. python signal handlers do not work well with socket |
|
326 | 326 | operations. |
|
327 | 327 | ''' |
|
328 | 328 | def handler(event): |
|
329 | 329 | _kernel32.ExitProcess(1) |
|
330 | 330 | |
|
331 | 331 | if _signalhandler: |
|
332 | 332 | return # already registered |
|
333 | 333 | h = _SIGNAL_HANDLER(handler) |
|
334 | 334 | _signalhandler.append(h) # needed to prevent garbage collection |
|
335 | 335 | if not _kernel32.SetConsoleCtrlHandler(h, True): |
|
336 |
raise ctypes.WinError |
|
|
336 | raise ctypes.WinError | |
|
337 | 337 | |
|
338 | 338 | def hidewindow(): |
|
339 | 339 | |
|
340 | 340 | def callback(hwnd, pid): |
|
341 | 341 | wpid = _DWORD() |
|
342 | 342 | _user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid)) |
|
343 | 343 | if pid == wpid.value: |
|
344 | 344 | _user32.ShowWindow(hwnd, _SW_HIDE) |
|
345 | 345 | return False # stop enumerating windows |
|
346 | 346 | return True |
|
347 | 347 | |
|
348 | 348 | pid = _kernel32.GetCurrentProcessId() |
|
349 | 349 | _user32.EnumWindows(_WNDENUMPROC(callback), pid) |
|
350 | 350 | |
|
351 | 351 | def termwidth(): |
|
352 | 352 | # cmd.exe does not handle CR like a unix console, the CR is |
|
353 | 353 | # counted in the line length. On 80 columns consoles, if 80 |
|
354 | 354 | # characters are written, the following CR won't apply on the |
|
355 | 355 | # current line but on the new one. Keep room for it. |
|
356 | 356 | width = 79 |
|
357 | 357 | # Query stderr to avoid problems with redirections |
|
358 | 358 | screenbuf = _kernel32.GetStdHandle( |
|
359 | 359 | _STD_ERROR_HANDLE) # don't close the handle returned |
|
360 | 360 | if screenbuf is None or screenbuf == _INVALID_HANDLE_VALUE: |
|
361 | 361 | return width |
|
362 | 362 | csbi = _CONSOLE_SCREEN_BUFFER_INFO() |
|
363 | 363 | if not _kernel32.GetConsoleScreenBufferInfo( |
|
364 | 364 | screenbuf, ctypes.byref(csbi)): |
|
365 | 365 | return width |
|
366 | 366 | width = csbi.srWindow.Right - csbi.srWindow.Left |
|
367 | 367 | return width |
|
368 | 368 | |
|
369 | 369 | def spawndetached(args): |
|
370 | 370 | # No standard library function really spawns a fully detached |
|
371 | 371 | # process under win32 because they allocate pipes or other objects |
|
372 | 372 | # to handle standard streams communications. Passing these objects |
|
373 | 373 | # to the child process requires handle inheritance to be enabled |
|
374 | 374 | # which makes really detached processes impossible. |
|
375 | 375 | si = _STARTUPINFO() |
|
376 | 376 | si.cb = ctypes.sizeof(_STARTUPINFO) |
|
377 | 377 | si.dwFlags = _STARTF_USESHOWWINDOW |
|
378 | 378 | si.wShowWindow = _SW_HIDE |
|
379 | 379 | |
|
380 | 380 | pi = _PROCESS_INFORMATION() |
|
381 | 381 | |
|
382 | 382 | env = '' |
|
383 | 383 | for k in os.environ: |
|
384 | 384 | env += "%s=%s\0" % (k, os.environ[k]) |
|
385 | 385 | if not env: |
|
386 | 386 | env = '\0' |
|
387 | 387 | env += '\0' |
|
388 | 388 | |
|
389 | 389 | args = subprocess.list2cmdline(args) |
|
390 | 390 | # Not running the command in shell mode makes python26 hang when |
|
391 | 391 | # writing to hgweb output socket. |
|
392 | 392 | comspec = os.environ.get("COMSPEC", "cmd.exe") |
|
393 | 393 | args = comspec + " /c " + args |
|
394 | 394 | |
|
395 | 395 | res = _kernel32.CreateProcessA( |
|
396 | 396 | None, args, None, None, False, _DETACHED_PROCESS, |
|
397 | 397 | env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi)) |
|
398 | 398 | if not res: |
|
399 |
raise ctypes.WinError |
|
|
399 | raise ctypes.WinError | |
|
400 | 400 | |
|
401 | 401 | return pi.dwProcessId |
|
402 | 402 | |
|
403 | 403 | def unlink(f): |
|
404 | 404 | '''try to implement POSIX' unlink semantics on Windows''' |
|
405 | 405 | |
|
406 | 406 | # POSIX allows to unlink and rename open files. Windows has serious |
|
407 | 407 | # problems with doing that: |
|
408 | 408 | # - Calling os.unlink (or os.rename) on a file f fails if f or any |
|
409 | 409 | # hardlinked copy of f has been opened with Python's open(). There is no |
|
410 | 410 | # way such a file can be deleted or renamed on Windows (other than |
|
411 | 411 | # scheduling the delete or rename for the next reboot). |
|
412 | 412 | # - Calling os.unlink on a file that has been opened with Mercurial's |
|
413 | 413 | # posixfile (or comparable methods) will delay the actual deletion of |
|
414 | 414 | # the file for as long as the file is held open. The filename is blocked |
|
415 | 415 | # during that time and cannot be used for recreating a new file under |
|
416 | 416 | # that same name ("zombie file"). Directories containing such zombie files |
|
417 | 417 | # cannot be removed or moved. |
|
418 | 418 | # A file that has been opened with posixfile can be renamed, so we rename |
|
419 | 419 | # f to a random temporary name before calling os.unlink on it. This allows |
|
420 | 420 | # callers to recreate f immediately while having other readers do their |
|
421 | 421 | # implicit zombie filename blocking on a temporary name. |
|
422 | 422 | |
|
423 | 423 | for tries in xrange(10): |
|
424 | 424 | temp = '%s-%08x' % (f, random.randint(0, 0xffffffff)) |
|
425 | 425 | try: |
|
426 | 426 | os.rename(f, temp) # raises OSError EEXIST if temp exists |
|
427 | 427 | break |
|
428 | 428 | except OSError, e: |
|
429 | 429 | if e.errno != errno.EEXIST: |
|
430 | 430 | raise |
|
431 | 431 | else: |
|
432 | 432 | raise IOError, (errno.EEXIST, "No usable temporary filename found") |
|
433 | 433 | |
|
434 | 434 | try: |
|
435 | 435 | os.unlink(temp) |
|
436 | 436 | except OSError: |
|
437 | 437 | # The unlink might have failed because the READONLY attribute may heave |
|
438 | 438 | # been set on the original file. Rename works fine with READONLY set, |
|
439 | 439 | # but not os.unlink. Reset all attributes and try again. |
|
440 | 440 | _kernel32.SetFileAttributesA(temp, _FILE_ATTRIBUTE_NORMAL) |
|
441 | 441 | try: |
|
442 | 442 | os.unlink(temp) |
|
443 | 443 | except OSError: |
|
444 | 444 | # The unlink might have failed due to some very rude AV-Scanners. |
|
445 | 445 | # Leaking a tempfile is the lesser evil than aborting here and |
|
446 | 446 | # leaving some potentially serious inconsistencies. |
|
447 | 447 | pass |
|
448 | 448 | |
|
449 | 449 | def makedir(path, notindexed): |
|
450 | 450 | os.mkdir(path) |
|
451 | 451 | if notindexed: |
|
452 | 452 | _kernel32.SetFileAttributesA(path, _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED) |
@@ -1,319 +1,319 | |||
|
1 | 1 | # windows.py - Windows utility function implementations for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | 9 | import osutil |
|
10 | 10 | import errno, msvcrt, os, re, sys |
|
11 | 11 | |
|
12 | 12 | import win32 |
|
13 | 13 | executablepath = win32.executablepath |
|
14 | 14 | getuser = win32.getuser |
|
15 | 15 | hidewindow = win32.hidewindow |
|
16 | 16 | lookupreg = win32.lookupreg |
|
17 | 17 | makedir = win32.makedir |
|
18 | 18 | nlinks = win32.nlinks |
|
19 | 19 | oslink = win32.oslink |
|
20 | 20 | samedevice = win32.samedevice |
|
21 | 21 | samefile = win32.samefile |
|
22 | 22 | setsignalhandler = win32.setsignalhandler |
|
23 | 23 | spawndetached = win32.spawndetached |
|
24 | 24 | termwidth = win32.termwidth |
|
25 | 25 | testpid = win32.testpid |
|
26 | 26 | unlink = win32.unlink |
|
27 | 27 | |
|
28 | 28 | nulldev = 'NUL:' |
|
29 | 29 | umask = 0022 |
|
30 | 30 | |
|
31 | 31 | # wrap osutil.posixfile to provide friendlier exceptions |
|
32 | 32 | def posixfile(name, mode='r', buffering=-1): |
|
33 | 33 | try: |
|
34 | 34 | return osutil.posixfile(name, mode, buffering) |
|
35 | 35 | except WindowsError, err: |
|
36 | 36 | raise IOError(err.errno, '%s: %s' % (name, err.strerror)) |
|
37 | 37 | posixfile.__doc__ = osutil.posixfile.__doc__ |
|
38 | 38 | |
|
39 | 39 | class winstdout(object): |
|
40 | 40 | '''stdout on windows misbehaves if sent through a pipe''' |
|
41 | 41 | |
|
42 | 42 | def __init__(self, fp): |
|
43 | 43 | self.fp = fp |
|
44 | 44 | |
|
45 | 45 | def __getattr__(self, key): |
|
46 | 46 | return getattr(self.fp, key) |
|
47 | 47 | |
|
48 | 48 | def close(self): |
|
49 | 49 | try: |
|
50 | 50 | self.fp.close() |
|
51 | 51 | except IOError: |
|
52 | 52 | pass |
|
53 | 53 | |
|
54 | 54 | def write(self, s): |
|
55 | 55 | try: |
|
56 | 56 | # This is workaround for "Not enough space" error on |
|
57 | 57 | # writing large size of data to console. |
|
58 | 58 | limit = 16000 |
|
59 | 59 | l = len(s) |
|
60 | 60 | start = 0 |
|
61 | 61 | self.softspace = 0 |
|
62 | 62 | while start < l: |
|
63 | 63 | end = start + limit |
|
64 | 64 | self.fp.write(s[start:end]) |
|
65 | 65 | start = end |
|
66 | 66 | except IOError, inst: |
|
67 | 67 | if inst.errno != 0: |
|
68 | 68 | raise |
|
69 | 69 | self.close() |
|
70 | 70 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
71 | 71 | |
|
72 | 72 | def flush(self): |
|
73 | 73 | try: |
|
74 | 74 | return self.fp.flush() |
|
75 | 75 | except IOError, inst: |
|
76 | 76 | if inst.errno != errno.EINVAL: |
|
77 | 77 | raise |
|
78 | 78 | self.close() |
|
79 | 79 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
80 | 80 | |
|
81 | 81 | sys.__stdout__ = sys.stdout = winstdout(sys.stdout) |
|
82 | 82 | |
|
83 | 83 | def _is_win_9x(): |
|
84 | 84 | '''return true if run on windows 95, 98 or me.''' |
|
85 | 85 | try: |
|
86 | 86 | return sys.getwindowsversion()[3] == 1 |
|
87 | 87 | except AttributeError: |
|
88 | 88 | return 'command' in os.environ.get('comspec', '') |
|
89 | 89 | |
|
90 | 90 | def openhardlinks(): |
|
91 | 91 | return not _is_win_9x() |
|
92 | 92 | |
|
93 | 93 | def parsepatchoutput(output_line): |
|
94 | 94 | """parses the output produced by patch and returns the filename""" |
|
95 | 95 | pf = output_line[14:] |
|
96 | 96 | if pf[0] == '`': |
|
97 | 97 | pf = pf[1:-1] # Remove the quotes |
|
98 | 98 | return pf |
|
99 | 99 | |
|
100 | 100 | def sshargs(sshcmd, host, user, port): |
|
101 | 101 | '''Build argument list for ssh or Plink''' |
|
102 | 102 | pflag = 'plink' in sshcmd.lower() and '-P' or '-p' |
|
103 | 103 | args = user and ("%s@%s" % (user, host)) or host |
|
104 | 104 | return port and ("%s %s %s" % (args, pflag, port)) or args |
|
105 | 105 | |
|
106 | 106 | def setflags(f, l, x): |
|
107 | 107 | pass |
|
108 | 108 | |
|
109 | 109 | def copymode(src, dst, mode=None): |
|
110 | 110 | pass |
|
111 | 111 | |
|
112 | 112 | def checkexec(path): |
|
113 | 113 | return False |
|
114 | 114 | |
|
115 | 115 | def checklink(path): |
|
116 | 116 | return False |
|
117 | 117 | |
|
118 | 118 | def setbinary(fd): |
|
119 | 119 | # When run without console, pipes may expose invalid |
|
120 | 120 | # fileno(), usually set to -1. |
|
121 | 121 | fno = getattr(fd, 'fileno', None) |
|
122 | 122 | if fno is not None and fno() >= 0: |
|
123 | 123 | msvcrt.setmode(fno(), os.O_BINARY) |
|
124 | 124 | |
|
125 | 125 | def pconvert(path): |
|
126 | 126 | return path.replace(os.sep, '/') |
|
127 | 127 | |
|
128 | 128 | def localpath(path): |
|
129 | 129 | return path.replace('/', '\\') |
|
130 | 130 | |
|
131 | 131 | def normpath(path): |
|
132 | 132 | return pconvert(os.path.normpath(path)) |
|
133 | 133 | |
|
134 | 134 | encodinglower = None |
|
135 | 135 | encodingupper = None |
|
136 | 136 | |
|
137 | 137 | def normcase(path): |
|
138 | 138 | return encodingupper(path) |
|
139 | 139 | |
|
140 | 140 | def realpath(path): |
|
141 | 141 | ''' |
|
142 | 142 | Returns the true, canonical file system path equivalent to the given |
|
143 | 143 | path. |
|
144 | 144 | ''' |
|
145 | 145 | # TODO: There may be a more clever way to do this that also handles other, |
|
146 | 146 | # less common file systems. |
|
147 | 147 | return os.path.normpath(normcase(os.path.realpath(path))) |
|
148 | 148 | |
|
149 | 149 | def samestat(s1, s2): |
|
150 | 150 | return False |
|
151 | 151 | |
|
152 | 152 | # A sequence of backslashes is special iff it precedes a double quote: |
|
153 | 153 | # - if there's an even number of backslashes, the double quote is not |
|
154 | 154 | # quoted (i.e. it ends the quoted region) |
|
155 | 155 | # - if there's an odd number of backslashes, the double quote is quoted |
|
156 | 156 | # - in both cases, every pair of backslashes is unquoted into a single |
|
157 | 157 | # backslash |
|
158 | 158 | # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) |
|
159 | 159 | # So, to quote a string, we must surround it in double quotes, double |
|
160 | 160 | # the number of backslashes that preceed double quotes and add another |
|
161 | 161 | # backslash before every double quote (being careful with the double |
|
162 | 162 | # quote we've appended to the end) |
|
163 | 163 | _quotere = None |
|
164 | 164 | def shellquote(s): |
|
165 | 165 | global _quotere |
|
166 | 166 | if _quotere is None: |
|
167 | 167 | _quotere = re.compile(r'(\\*)("|\\$)') |
|
168 | 168 | return '"%s"' % _quotere.sub(r'\1\1\\\2', s) |
|
169 | 169 | |
|
170 | 170 | def quotecommand(cmd): |
|
171 | 171 | """Build a command string suitable for os.popen* calls.""" |
|
172 | 172 | if sys.version_info < (2, 7, 1): |
|
173 | 173 | # Python versions since 2.7.1 do this extra quoting themselves |
|
174 | 174 | return '"' + cmd + '"' |
|
175 | 175 | return cmd |
|
176 | 176 | |
|
177 | 177 | def popen(command, mode='r'): |
|
178 | 178 | # Work around "popen spawned process may not write to stdout |
|
179 | 179 | # under windows" |
|
180 | 180 | # http://bugs.python.org/issue1366 |
|
181 | 181 | command += " 2> %s" % nulldev |
|
182 | 182 | return os.popen(quotecommand(command), mode) |
|
183 | 183 | |
|
184 | 184 | def explainexit(code): |
|
185 | 185 | return _("exited with status %d") % code, code |
|
186 | 186 | |
|
187 | 187 | # if you change this stub into a real check, please try to implement the |
|
188 | 188 | # username and groupname functions above, too. |
|
189 | 189 | def isowner(st): |
|
190 | 190 | return True |
|
191 | 191 | |
|
192 | 192 | def findexe(command): |
|
193 | 193 | '''Find executable for command searching like cmd.exe does. |
|
194 | 194 | If command is a basename then PATH is searched for command. |
|
195 | 195 | PATH isn't searched if command is an absolute or relative path. |
|
196 | 196 | An extension from PATHEXT is found and added if not present. |
|
197 | 197 | If command isn't found None is returned.''' |
|
198 | 198 | pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD') |
|
199 | 199 | pathexts = [ext for ext in pathext.lower().split(os.pathsep)] |
|
200 | 200 | if os.path.splitext(command)[1].lower() in pathexts: |
|
201 | 201 | pathexts = [''] |
|
202 | 202 | |
|
203 | 203 | def findexisting(pathcommand): |
|
204 | 204 | 'Will append extension (if needed) and return existing file' |
|
205 | 205 | for ext in pathexts: |
|
206 | 206 | executable = pathcommand + ext |
|
207 | 207 | if os.path.exists(executable): |
|
208 | 208 | return executable |
|
209 | 209 | return None |
|
210 | 210 | |
|
211 | 211 | if os.sep in command: |
|
212 | 212 | return findexisting(command) |
|
213 | 213 | |
|
214 | 214 | for path in os.environ.get('PATH', '').split(os.pathsep): |
|
215 | 215 | executable = findexisting(os.path.join(path, command)) |
|
216 | 216 | if executable is not None: |
|
217 | 217 | return executable |
|
218 | 218 | return findexisting(os.path.expanduser(os.path.expandvars(command))) |
|
219 | 219 | |
|
220 | 220 | def statfiles(files): |
|
221 | 221 | '''Stat each file in files and yield stat or None if file does not exist. |
|
222 | 222 | Cluster and cache stat per directory to minimize number of OS stat calls.''' |
|
223 | 223 | dircache = {} # dirname -> filename -> status | None if file does not exist |
|
224 | 224 | for nf in files: |
|
225 | 225 | nf = normcase(nf) |
|
226 | 226 | dir, base = os.path.split(nf) |
|
227 | 227 | if not dir: |
|
228 | 228 | dir = '.' |
|
229 | 229 | cache = dircache.get(dir, None) |
|
230 | 230 | if cache is None: |
|
231 | 231 | try: |
|
232 | 232 | dmap = dict([(normcase(n), s) |
|
233 | 233 | for n, k, s in osutil.listdir(dir, True)]) |
|
234 | 234 | except OSError, err: |
|
235 | 235 | # handle directory not found in Python version prior to 2.5 |
|
236 | 236 | # Python <= 2.4 returns native Windows code 3 in errno |
|
237 | 237 | # Python >= 2.5 returns ENOENT and adds winerror field |
|
238 | 238 | # EINVAL is raised if dir is not a directory. |
|
239 | 239 | if err.errno not in (3, errno.ENOENT, errno.EINVAL, |
|
240 | 240 | errno.ENOTDIR): |
|
241 | 241 | raise |
|
242 | 242 | dmap = {} |
|
243 | 243 | cache = dircache.setdefault(dir, dmap) |
|
244 | 244 | yield cache.get(base, None) |
|
245 | 245 | |
|
246 | 246 | def username(uid=None): |
|
247 | 247 | """Return the name of the user with the given uid. |
|
248 | 248 | |
|
249 | 249 | If uid is None, return the name of the current user.""" |
|
250 | 250 | return None |
|
251 | 251 | |
|
252 | 252 | def groupname(gid=None): |
|
253 | 253 | """Return the name of the group with the given gid. |
|
254 | 254 | |
|
255 | 255 | If gid is None, return the name of the current group.""" |
|
256 | 256 | return None |
|
257 | 257 | |
|
258 | 258 | def _removedirs(name): |
|
259 | 259 | """special version of os.removedirs that does not remove symlinked |
|
260 | 260 | directories or junction points if they actually contain files""" |
|
261 | 261 | if osutil.listdir(name): |
|
262 | 262 | return |
|
263 | 263 | os.rmdir(name) |
|
264 | 264 | head, tail = os.path.split(name) |
|
265 | 265 | if not tail: |
|
266 | 266 | head, tail = os.path.split(head) |
|
267 | 267 | while head and tail: |
|
268 | 268 | try: |
|
269 | 269 | if osutil.listdir(head): |
|
270 | 270 | return |
|
271 | 271 | os.rmdir(head) |
|
272 | 272 | except (ValueError, OSError): |
|
273 | 273 | break |
|
274 | 274 | head, tail = os.path.split(head) |
|
275 | 275 | |
|
276 | 276 | def unlinkpath(f): |
|
277 | 277 | """unlink and remove the directory if it is empty""" |
|
278 | 278 | unlink(f) |
|
279 | 279 | # try removing directories that might now be empty |
|
280 | 280 | try: |
|
281 | 281 | _removedirs(os.path.dirname(f)) |
|
282 | 282 | except OSError: |
|
283 | 283 | pass |
|
284 | 284 | |
|
285 | 285 | def rename(src, dst): |
|
286 | 286 | '''atomically rename file src to dst, replacing dst if it exists''' |
|
287 | 287 | try: |
|
288 | 288 | os.rename(src, dst) |
|
289 | 289 | except OSError, e: |
|
290 | 290 | if e.errno != errno.EEXIST: |
|
291 | 291 | raise |
|
292 | 292 | unlink(dst) |
|
293 | 293 | os.rename(src, dst) |
|
294 | 294 | |
|
295 | 295 | def gethgcmd(): |
|
296 | 296 | return [sys.executable] + sys.argv[:1] |
|
297 | 297 | |
|
298 | 298 | def termwidth(): |
|
299 | 299 | # cmd.exe does not handle CR like a unix console, the CR is |
|
300 | 300 | # counted in the line length. On 80 columns consoles, if 80 |
|
301 | 301 | # characters are written, the following CR won't apply on the |
|
302 | 302 | # current line but on the new one. Keep room for it. |
|
303 | 303 | return 79 |
|
304 | 304 | |
|
305 | 305 | def groupmembers(name): |
|
306 | 306 | # Don't support groups on Windows for now |
|
307 |
raise KeyError |
|
|
307 | raise KeyError | |
|
308 | 308 | |
|
309 | 309 | def isexec(f): |
|
310 | 310 | return False |
|
311 | 311 | |
|
312 | 312 | class cachestat(object): |
|
313 | 313 | def __init__(self, path): |
|
314 | 314 | pass |
|
315 | 315 | |
|
316 | 316 | def cacheable(self): |
|
317 | 317 | return False |
|
318 | 318 | |
|
319 | 319 | expandglobs = True |
@@ -1,260 +1,260 | |||
|
1 | 1 | import sys, os, struct, subprocess, cStringIO, re, shutil |
|
2 | 2 | |
|
3 | 3 | def connect(path=None): |
|
4 | 4 | cmdline = ['hg', 'serve', '--cmdserver', 'pipe'] |
|
5 | 5 | if path: |
|
6 | 6 | cmdline += ['-R', path] |
|
7 | 7 | |
|
8 | 8 | server = subprocess.Popen(cmdline, stdin=subprocess.PIPE, |
|
9 | 9 | stdout=subprocess.PIPE) |
|
10 | 10 | |
|
11 | 11 | return server |
|
12 | 12 | |
|
13 | 13 | def writeblock(server, data): |
|
14 | 14 | server.stdin.write(struct.pack('>I', len(data))) |
|
15 | 15 | server.stdin.write(data) |
|
16 | 16 | server.stdin.flush() |
|
17 | 17 | |
|
18 | 18 | def readchannel(server): |
|
19 | 19 | data = server.stdout.read(5) |
|
20 | 20 | if not data: |
|
21 |
raise EOFError |
|
|
21 | raise EOFError | |
|
22 | 22 | channel, length = struct.unpack('>cI', data) |
|
23 | 23 | if channel in 'IL': |
|
24 | 24 | return channel, length |
|
25 | 25 | else: |
|
26 | 26 | return channel, server.stdout.read(length) |
|
27 | 27 | |
|
28 | 28 | def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None): |
|
29 | 29 | print ' runcommand', ' '.join(args) |
|
30 | 30 | sys.stdout.flush() |
|
31 | 31 | server.stdin.write('runcommand\n') |
|
32 | 32 | writeblock(server, '\0'.join(args)) |
|
33 | 33 | |
|
34 | 34 | if not input: |
|
35 | 35 | input = cStringIO.StringIO() |
|
36 | 36 | |
|
37 | 37 | while True: |
|
38 | 38 | ch, data = readchannel(server) |
|
39 | 39 | if ch == 'o': |
|
40 | 40 | output.write(data) |
|
41 | 41 | output.flush() |
|
42 | 42 | elif ch == 'e': |
|
43 | 43 | error.write(data) |
|
44 | 44 | error.flush() |
|
45 | 45 | elif ch == 'I': |
|
46 | 46 | writeblock(server, input.read(data)) |
|
47 | 47 | elif ch == 'L': |
|
48 | 48 | writeblock(server, input.readline(data)) |
|
49 | 49 | elif ch == 'r': |
|
50 | 50 | return struct.unpack('>i', data)[0] |
|
51 | 51 | else: |
|
52 | 52 | print "unexpected channel %c: %r" % (ch, data) |
|
53 | 53 | if ch.isupper(): |
|
54 | 54 | return |
|
55 | 55 | |
|
56 | 56 | def check(func, repopath=None): |
|
57 | 57 | |
|
58 | 58 | print 'testing %s:' % func.__name__ |
|
59 | 59 | |
|
60 | 60 | sys.stdout.flush() |
|
61 | 61 | server = connect(repopath) |
|
62 | 62 | try: |
|
63 | 63 | return func(server) |
|
64 | 64 | finally: |
|
65 | 65 | server.stdin.close() |
|
66 | 66 | server.wait() |
|
67 | 67 | |
|
68 | 68 | def unknowncommand(server): |
|
69 | 69 | server.stdin.write('unknowncommand\n') |
|
70 | 70 | |
|
71 | 71 | def hellomessage(server): |
|
72 | 72 | ch, data = readchannel(server) |
|
73 | 73 | # escaping python tests output not supported |
|
74 | 74 | print '%c, %r' % (ch, re.sub('encoding: [a-zA-Z0-9-]+', 'encoding: ***', |
|
75 | 75 | data)) |
|
76 | 76 | |
|
77 | 77 | # run an arbitrary command to make sure the next thing the server sends |
|
78 | 78 | # isn't part of the hello message |
|
79 | 79 | runcommand(server, ['id']) |
|
80 | 80 | |
|
81 | 81 | def checkruncommand(server): |
|
82 | 82 | # hello block |
|
83 | 83 | readchannel(server) |
|
84 | 84 | |
|
85 | 85 | # no args |
|
86 | 86 | runcommand(server, []) |
|
87 | 87 | |
|
88 | 88 | # global options |
|
89 | 89 | runcommand(server, ['id', '--quiet']) |
|
90 | 90 | |
|
91 | 91 | # make sure global options don't stick through requests |
|
92 | 92 | runcommand(server, ['id']) |
|
93 | 93 | |
|
94 | 94 | # --config |
|
95 | 95 | runcommand(server, ['id', '--config', 'ui.quiet=True']) |
|
96 | 96 | |
|
97 | 97 | # make sure --config doesn't stick |
|
98 | 98 | runcommand(server, ['id']) |
|
99 | 99 | |
|
100 | 100 | def inputeof(server): |
|
101 | 101 | readchannel(server) |
|
102 | 102 | server.stdin.write('runcommand\n') |
|
103 | 103 | # close stdin while server is waiting for input |
|
104 | 104 | server.stdin.close() |
|
105 | 105 | |
|
106 | 106 | # server exits with 1 if the pipe closed while reading the command |
|
107 | 107 | print 'server exit code =', server.wait() |
|
108 | 108 | |
|
109 | 109 | def serverinput(server): |
|
110 | 110 | readchannel(server) |
|
111 | 111 | |
|
112 | 112 | patch = """ |
|
113 | 113 | # HG changeset patch |
|
114 | 114 | # User test |
|
115 | 115 | # Date 0 0 |
|
116 | 116 | # Node ID c103a3dec114d882c98382d684d8af798d09d857 |
|
117 | 117 | # Parent 0000000000000000000000000000000000000000 |
|
118 | 118 | 1 |
|
119 | 119 | |
|
120 | 120 | diff -r 000000000000 -r c103a3dec114 a |
|
121 | 121 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
122 | 122 | +++ b/a Thu Jan 01 00:00:00 1970 +0000 |
|
123 | 123 | @@ -0,0 +1,1 @@ |
|
124 | 124 | +1 |
|
125 | 125 | """ |
|
126 | 126 | |
|
127 | 127 | runcommand(server, ['import', '-'], input=cStringIO.StringIO(patch)) |
|
128 | 128 | runcommand(server, ['log']) |
|
129 | 129 | |
|
130 | 130 | def cwd(server): |
|
131 | 131 | """ check that --cwd doesn't persist between requests """ |
|
132 | 132 | readchannel(server) |
|
133 | 133 | os.mkdir('foo') |
|
134 | 134 | f = open('foo/bar', 'wb') |
|
135 | 135 | f.write('a') |
|
136 | 136 | f.close() |
|
137 | 137 | runcommand(server, ['--cwd', 'foo', 'st', 'bar']) |
|
138 | 138 | runcommand(server, ['st', 'foo/bar']) |
|
139 | 139 | os.remove('foo/bar') |
|
140 | 140 | |
|
141 | 141 | def localhgrc(server): |
|
142 | 142 | """ check that local configs for the cached repo aren't inherited when -R |
|
143 | 143 | is used """ |
|
144 | 144 | readchannel(server) |
|
145 | 145 | |
|
146 | 146 | # the cached repo local hgrc contains ui.foo=bar, so showconfig should |
|
147 | 147 | # show it |
|
148 | 148 | runcommand(server, ['showconfig']) |
|
149 | 149 | |
|
150 | 150 | # but not for this repo |
|
151 | 151 | runcommand(server, ['init', 'foo']) |
|
152 | 152 | runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults']) |
|
153 | 153 | shutil.rmtree('foo') |
|
154 | 154 | |
|
155 | 155 | def hook(**args): |
|
156 | 156 | print 'hook talking' |
|
157 | 157 | print 'now try to read something: %r' % sys.stdin.read() |
|
158 | 158 | |
|
159 | 159 | def hookoutput(server): |
|
160 | 160 | readchannel(server) |
|
161 | 161 | runcommand(server, ['--config', |
|
162 | 162 | 'hooks.pre-identify=python:test-commandserver.hook', |
|
163 | 163 | 'id'], |
|
164 | 164 | input=cStringIO.StringIO('some input')) |
|
165 | 165 | |
|
166 | 166 | def outsidechanges(server): |
|
167 | 167 | readchannel(server) |
|
168 | 168 | f = open('a', 'ab') |
|
169 | 169 | f.write('a\n') |
|
170 | 170 | f.close() |
|
171 | 171 | runcommand(server, ['status']) |
|
172 | 172 | os.system('hg ci -Am2') |
|
173 | 173 | runcommand(server, ['tip']) |
|
174 | 174 | runcommand(server, ['status']) |
|
175 | 175 | |
|
176 | 176 | def bookmarks(server): |
|
177 | 177 | readchannel(server) |
|
178 | 178 | runcommand(server, ['bookmarks']) |
|
179 | 179 | |
|
180 | 180 | # changes .hg/bookmarks |
|
181 | 181 | os.system('hg bookmark -i bm1') |
|
182 | 182 | os.system('hg bookmark -i bm2') |
|
183 | 183 | runcommand(server, ['bookmarks']) |
|
184 | 184 | |
|
185 | 185 | # changes .hg/bookmarks.current |
|
186 | 186 | os.system('hg upd bm1 -q') |
|
187 | 187 | runcommand(server, ['bookmarks']) |
|
188 | 188 | |
|
189 | 189 | runcommand(server, ['bookmarks', 'bm3']) |
|
190 | 190 | f = open('a', 'ab') |
|
191 | 191 | f.write('a\n') |
|
192 | 192 | f.close() |
|
193 | 193 | runcommand(server, ['commit', '-Amm']) |
|
194 | 194 | runcommand(server, ['bookmarks']) |
|
195 | 195 | |
|
196 | 196 | def tagscache(server): |
|
197 | 197 | readchannel(server) |
|
198 | 198 | runcommand(server, ['id', '-t', '-r', '0']) |
|
199 | 199 | os.system('hg tag -r 0 foo') |
|
200 | 200 | runcommand(server, ['id', '-t', '-r', '0']) |
|
201 | 201 | |
|
202 | 202 | def setphase(server): |
|
203 | 203 | readchannel(server) |
|
204 | 204 | runcommand(server, ['phase', '-r', '.']) |
|
205 | 205 | os.system('hg phase -r . -p') |
|
206 | 206 | runcommand(server, ['phase', '-r', '.']) |
|
207 | 207 | |
|
208 | 208 | def rollback(server): |
|
209 | 209 | readchannel(server) |
|
210 | 210 | runcommand(server, ['phase', '-r', '.', '-p']) |
|
211 | 211 | f = open('a', 'ab') |
|
212 | 212 | f.write('a\n') |
|
213 | 213 | f.close() |
|
214 | 214 | runcommand(server, ['commit', '-Am.']) |
|
215 | 215 | runcommand(server, ['rollback']) |
|
216 | 216 | runcommand(server, ['phase', '-r', '.']) |
|
217 | 217 | |
|
218 | 218 | def branch(server): |
|
219 | 219 | readchannel(server) |
|
220 | 220 | runcommand(server, ['branch']) |
|
221 | 221 | os.system('hg branch foo') |
|
222 | 222 | runcommand(server, ['branch']) |
|
223 | 223 | os.system('hg branch default') |
|
224 | 224 | |
|
225 | 225 | def hgignore(server): |
|
226 | 226 | readchannel(server) |
|
227 | 227 | f = open('.hgignore', 'ab') |
|
228 | 228 | f.write('') |
|
229 | 229 | f.close() |
|
230 | 230 | runcommand(server, ['commit', '-Am.']) |
|
231 | 231 | f = open('ignored-file', 'ab') |
|
232 | 232 | f.write('') |
|
233 | 233 | f.close() |
|
234 | 234 | f = open('.hgignore', 'ab') |
|
235 | 235 | f.write('ignored-file') |
|
236 | 236 | f.close() |
|
237 | 237 | runcommand(server, ['status', '-i', '-u']) |
|
238 | 238 | |
|
239 | 239 | if __name__ == '__main__': |
|
240 | 240 | os.system('hg init') |
|
241 | 241 | |
|
242 | 242 | check(hellomessage) |
|
243 | 243 | check(unknowncommand) |
|
244 | 244 | check(checkruncommand) |
|
245 | 245 | check(inputeof) |
|
246 | 246 | check(serverinput) |
|
247 | 247 | check(cwd) |
|
248 | 248 | |
|
249 | 249 | hgrc = open('.hg/hgrc', 'a') |
|
250 | 250 | hgrc.write('[ui]\nfoo=bar\n') |
|
251 | 251 | hgrc.close() |
|
252 | 252 | check(localhgrc) |
|
253 | 253 | check(hookoutput) |
|
254 | 254 | check(outsidechanges) |
|
255 | 255 | check(bookmarks) |
|
256 | 256 | check(tagscache) |
|
257 | 257 | check(setphase) |
|
258 | 258 | check(rollback) |
|
259 | 259 | check(branch) |
|
260 | 260 | check(hgignore) |
General Comments 0
You need to be logged in to leave comments.
Login now