Show More
The requested changes are too big and content was truncated. Show full diff
@@ -1,1164 +1,1161 b'' | |||
|
1 | 1 | # absorb.py |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """apply working directory changes to changesets (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | The absorb extension provides a command to use annotate information to |
|
11 | 11 | amend modified chunks into the corresponding non-public changesets. |
|
12 | 12 | |
|
13 | 13 | :: |
|
14 | 14 | |
|
15 | 15 | [absorb] |
|
16 | 16 | # only check 50 recent non-public changesets at most |
|
17 | 17 | max-stack-size = 50 |
|
18 | 18 | # whether to add noise to new commits to avoid obsolescence cycle |
|
19 | 19 | add-noise = 1 |
|
20 | 20 | # make `amend --correlated` a shortcut to the main command |
|
21 | 21 | amend-flag = correlated |
|
22 | 22 | |
|
23 | 23 | [color] |
|
24 | 24 | absorb.description = yellow |
|
25 | 25 | absorb.node = blue bold |
|
26 | 26 | absorb.path = bold |
|
27 | 27 | """ |
|
28 | 28 | |
|
29 | 29 | # TODO: |
|
30 | 30 | # * Rename config items to [commands] namespace |
|
31 | 31 | # * Converge getdraftstack() with other code in core |
|
32 | 32 | # * move many attributes on fixupstate to be private |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | import collections |
|
36 | 36 | |
|
37 | 37 | from mercurial.i18n import _ |
|
38 | 38 | from mercurial.node import ( |
|
39 | 39 | hex, |
|
40 | 40 | short, |
|
41 | 41 | ) |
|
42 | 42 | from mercurial import ( |
|
43 | 43 | cmdutil, |
|
44 | 44 | commands, |
|
45 | 45 | context, |
|
46 | 46 | crecord, |
|
47 | 47 | error, |
|
48 | 48 | linelog, |
|
49 | 49 | mdiff, |
|
50 | 50 | obsolete, |
|
51 | 51 | patch, |
|
52 | 52 | phases, |
|
53 | 53 | pycompat, |
|
54 | 54 | registrar, |
|
55 | 55 | rewriteutil, |
|
56 | 56 | scmutil, |
|
57 | 57 | util, |
|
58 | 58 | ) |
|
59 | 59 | from mercurial.utils import stringutil |
|
60 | 60 | |
|
61 | 61 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
62 | 62 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
63 | 63 | # be specifying the version(s) of Mercurial they are tested with, or |
|
64 | 64 | # leave the attribute unspecified. |
|
65 | 65 | testedwith = b'ships-with-hg-core' |
|
66 | 66 | |
|
67 | 67 | cmdtable = {} |
|
68 | 68 | command = registrar.command(cmdtable) |
|
69 | 69 | |
|
70 | 70 | configtable = {} |
|
71 | 71 | configitem = registrar.configitem(configtable) |
|
72 | 72 | |
|
73 | 73 | configitem(b'absorb', b'add-noise', default=True) |
|
74 | 74 | configitem(b'absorb', b'amend-flag', default=None) |
|
75 | 75 | configitem(b'absorb', b'max-stack-size', default=50) |
|
76 | 76 | |
|
77 | 77 | colortable = { |
|
78 | 78 | b'absorb.description': b'yellow', |
|
79 | 79 | b'absorb.node': b'blue bold', |
|
80 | 80 | b'absorb.path': b'bold', |
|
81 | 81 | } |
|
82 | 82 | |
|
83 | 83 | defaultdict = collections.defaultdict |
|
84 | 84 | |
|
85 | 85 | |
|
86 | 86 | class nullui(object): |
|
87 | 87 | """blank ui object doing nothing""" |
|
88 | 88 | |
|
89 | 89 | debugflag = False |
|
90 | 90 | verbose = False |
|
91 | 91 | quiet = True |
|
92 | 92 | |
|
93 | 93 | def __getitem__(name): |
|
94 | 94 | def nullfunc(*args, **kwds): |
|
95 | 95 | return |
|
96 | 96 | |
|
97 | 97 | return nullfunc |
|
98 | 98 | |
|
99 | 99 | |
|
100 | 100 | class emptyfilecontext(object): |
|
101 | 101 | """minimal filecontext representing an empty file""" |
|
102 | 102 | |
|
103 | 103 | def __init__(self, repo): |
|
104 | 104 | self._repo = repo |
|
105 | 105 | |
|
106 | 106 | def data(self): |
|
107 | 107 | return b'' |
|
108 | 108 | |
|
109 | 109 | def node(self): |
|
110 | 110 | return self._repo.nullid |
|
111 | 111 | |
|
112 | 112 | |
|
113 | 113 | def uniq(lst): |
|
114 | 114 | """list -> list. remove duplicated items without changing the order""" |
|
115 | 115 | seen = set() |
|
116 | 116 | result = [] |
|
117 | 117 | for x in lst: |
|
118 | 118 | if x not in seen: |
|
119 | 119 | seen.add(x) |
|
120 | 120 | result.append(x) |
|
121 | 121 | return result |
|
122 | 122 | |
|
123 | 123 | |
|
124 | 124 | def getdraftstack(headctx, limit=None): |
|
125 | 125 | """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets. |
|
126 | 126 | |
|
127 | 127 | changesets are sorted in topo order, oldest first. |
|
128 | 128 | return at most limit items, if limit is a positive number. |
|
129 | 129 | |
|
130 | 130 | merges are considered as non-draft as well. i.e. every commit |
|
131 | 131 | returned has and only has 1 parent. |
|
132 | 132 | """ |
|
133 | 133 | ctx = headctx |
|
134 | 134 | result = [] |
|
135 | 135 | while ctx.phase() != phases.public: |
|
136 | 136 | if limit and len(result) >= limit: |
|
137 | 137 | break |
|
138 | 138 | parents = ctx.parents() |
|
139 | 139 | if len(parents) != 1: |
|
140 | 140 | break |
|
141 | 141 | result.append(ctx) |
|
142 | 142 | ctx = parents[0] |
|
143 | 143 | result.reverse() |
|
144 | 144 | return result |
|
145 | 145 | |
|
146 | 146 | |
|
147 | 147 | def getfilestack(stack, path, seenfctxs=None): |
|
148 | 148 | """([ctx], str, set) -> [fctx], {ctx: fctx} |
|
149 | 149 | |
|
150 | 150 | stack is a list of contexts, from old to new. usually they are what |
|
151 | 151 | "getdraftstack" returns. |
|
152 | 152 | |
|
153 | 153 | follows renames, but not copies. |
|
154 | 154 | |
|
155 | 155 | seenfctxs is a set of filecontexts that will be considered "immutable". |
|
156 | 156 | they are usually what this function returned in earlier calls, useful |
|
157 | 157 | to avoid issues that a file was "moved" to multiple places and was then |
|
158 | 158 | modified differently, like: "a" was copied to "b", "a" was also copied to |
|
159 | 159 | "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a" |
|
160 | 160 | and we enforce only one of them to be able to affect "a"'s content. |
|
161 | 161 | |
|
162 | 162 | return an empty list and an empty dict, if the specified path does not |
|
163 | 163 | exist in stack[-1] (the top of the stack). |
|
164 | 164 | |
|
165 | 165 | otherwise, return a list of de-duplicated filecontexts, and the map to |
|
166 | 166 | convert ctx in the stack to fctx, for possible mutable fctxs. the first item |
|
167 | 167 | of the list would be outside the stack and should be considered immutable. |
|
168 | 168 | the remaining items are within the stack. |
|
169 | 169 | |
|
170 | 170 | for example, given the following changelog and corresponding filelog |
|
171 | 171 | revisions: |
|
172 | 172 | |
|
173 | 173 | changelog: 3----4----5----6----7 |
|
174 | 174 | filelog: x 0----1----1----2 (x: no such file yet) |
|
175 | 175 | |
|
176 | 176 | - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2}) |
|
177 | 177 | - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a |
|
178 | 178 | dummy empty filecontext. |
|
179 | 179 | - if stack = [2], returns ([], {}) |
|
180 | 180 | - if stack = [7], returns ([1, 2], {7: 2}) |
|
181 | 181 | - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be |
|
182 | 182 | removed, since 1 is immutable. |
|
183 | 183 | """ |
|
184 | 184 | if seenfctxs is None: |
|
185 | 185 | seenfctxs = set() |
|
186 | 186 | assert stack |
|
187 | 187 | |
|
188 | 188 | if path not in stack[-1]: |
|
189 | 189 | return [], {} |
|
190 | 190 | |
|
191 | 191 | fctxs = [] |
|
192 | 192 | fctxmap = {} |
|
193 | 193 | |
|
194 | 194 | pctx = stack[0].p1() # the public (immutable) ctx we stop at |
|
195 | 195 | for ctx in reversed(stack): |
|
196 | 196 | if path not in ctx: # the file is added in the next commit |
|
197 | 197 | pctx = ctx |
|
198 | 198 | break |
|
199 | 199 | fctx = ctx[path] |
|
200 | 200 | fctxs.append(fctx) |
|
201 | 201 | if fctx in seenfctxs: # treat fctx as the immutable one |
|
202 | 202 | pctx = None # do not add another immutable fctx |
|
203 | 203 | break |
|
204 | 204 | fctxmap[ctx] = fctx # only for mutable fctxs |
|
205 | 205 | copy = fctx.copysource() |
|
206 | 206 | if copy: |
|
207 | 207 | path = copy # follow rename |
|
208 | 208 | if path in ctx: # but do not follow copy |
|
209 | 209 | pctx = ctx.p1() |
|
210 | 210 | break |
|
211 | 211 | |
|
212 | 212 | if pctx is not None: # need an extra immutable fctx |
|
213 | 213 | if path in pctx: |
|
214 | 214 | fctxs.append(pctx[path]) |
|
215 | 215 | else: |
|
216 | 216 | fctxs.append(emptyfilecontext(pctx.repo())) |
|
217 | 217 | |
|
218 | 218 | fctxs.reverse() |
|
219 | 219 | # note: we rely on a property of hg: filerev is not reused for linear |
|
220 | 220 | # history. i.e. it's impossible to have: |
|
221 | 221 | # changelog: 4----5----6 (linear, no merges) |
|
222 | 222 | # filelog: 1----2----1 |
|
223 | 223 | # ^ reuse filerev (impossible) |
|
224 | 224 | # because parents are part of the hash. if that's not true, we need to |
|
225 | 225 | # remove uniq and find a different way to identify fctxs. |
|
226 | 226 | return uniq(fctxs), fctxmap |
|
227 | 227 | |
|
228 | 228 | |
|
229 | 229 | class overlaystore(patch.filestore): |
|
230 | 230 | """read-only, hybrid store based on a dict and ctx. |
|
231 | 231 | memworkingcopy: {path: content}, overrides file contents. |
|
232 | 232 | """ |
|
233 | 233 | |
|
234 | 234 | def __init__(self, basectx, memworkingcopy): |
|
235 | 235 | self.basectx = basectx |
|
236 | 236 | self.memworkingcopy = memworkingcopy |
|
237 | 237 | |
|
238 | 238 | def getfile(self, path): |
|
239 | 239 | """comply with mercurial.patch.filestore.getfile""" |
|
240 | 240 | if path not in self.basectx: |
|
241 | 241 | return None, None, None |
|
242 | 242 | fctx = self.basectx[path] |
|
243 | 243 | if path in self.memworkingcopy: |
|
244 | 244 | content = self.memworkingcopy[path] |
|
245 | 245 | else: |
|
246 | 246 | content = fctx.data() |
|
247 | 247 | mode = (fctx.islink(), fctx.isexec()) |
|
248 | 248 | copy = fctx.copysource() |
|
249 | 249 | return content, mode, copy |
|
250 | 250 | |
|
251 | 251 | |
|
252 | 252 | def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None): |
|
253 | 253 | """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx |
|
254 | 254 | memworkingcopy overrides file contents. |
|
255 | 255 | """ |
|
256 | 256 | # parents must contain 2 items: (node1, node2) |
|
257 | 257 | if parents is None: |
|
258 | 258 | parents = ctx.repo().changelog.parents(ctx.node()) |
|
259 | 259 | if extra is None: |
|
260 | 260 | extra = ctx.extra() |
|
261 | 261 | if desc is None: |
|
262 | 262 | desc = ctx.description() |
|
263 | 263 | date = ctx.date() |
|
264 | 264 | user = ctx.user() |
|
265 | 265 | files = set(ctx.files()).union(memworkingcopy) |
|
266 | 266 | store = overlaystore(ctx, memworkingcopy) |
|
267 | 267 | return context.memctx( |
|
268 | 268 | repo=ctx.repo(), |
|
269 | 269 | parents=parents, |
|
270 | 270 | text=desc, |
|
271 | 271 | files=files, |
|
272 | 272 | filectxfn=store, |
|
273 | 273 | user=user, |
|
274 | 274 | date=date, |
|
275 | 275 | branch=None, |
|
276 | 276 | extra=extra, |
|
277 | 277 | ) |
|
278 | 278 | |
|
279 | 279 | |
|
280 | 280 | class filefixupstate(object): |
|
281 | 281 | """state needed to apply fixups to a single file |
|
282 | 282 | |
|
283 | 283 | internally, it keeps file contents of several revisions and a linelog. |
|
284 | 284 | |
|
285 | 285 | the linelog uses odd revision numbers for original contents (fctxs passed |
|
286 | 286 | to __init__), and even revision numbers for fixups, like: |
|
287 | 287 | |
|
288 | 288 | linelog rev 1: self.fctxs[0] (from an immutable "public" changeset) |
|
289 | 289 | linelog rev 2: fixups made to self.fctxs[0] |
|
290 | 290 | linelog rev 3: self.fctxs[1] (a child of fctxs[0]) |
|
291 | 291 | linelog rev 4: fixups made to self.fctxs[1] |
|
292 | 292 | ... |
|
293 | 293 | |
|
294 | 294 | a typical use is like: |
|
295 | 295 | |
|
296 | 296 | 1. call diffwith, to calculate self.fixups |
|
297 | 297 | 2. (optionally), present self.fixups to the user, or change it |
|
298 | 298 | 3. call apply, to apply changes |
|
299 | 299 | 4. read results from "finalcontents", or call getfinalcontent |
|
300 | 300 | """ |
|
301 | 301 | |
|
302 | 302 | def __init__(self, fctxs, path, ui=None, opts=None): |
|
303 | 303 | """([fctx], ui or None) -> None |
|
304 | 304 | |
|
305 | 305 | fctxs should be linear, and sorted by topo order - oldest first. |
|
306 | 306 | fctxs[0] will be considered as "immutable" and will not be changed. |
|
307 | 307 | """ |
|
308 | 308 | self.fctxs = fctxs |
|
309 | 309 | self.path = path |
|
310 | 310 | self.ui = ui or nullui() |
|
311 | 311 | self.opts = opts or {} |
|
312 | 312 | |
|
313 | 313 | # following fields are built from fctxs. they exist for perf reason |
|
314 | 314 | self.contents = [f.data() for f in fctxs] |
|
315 | 315 | self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents) |
|
316 | 316 | self.linelog = self._buildlinelog() |
|
317 | 317 | if self.ui.debugflag: |
|
318 | 318 | assert self._checkoutlinelog() == self.contents |
|
319 | 319 | |
|
320 | 320 | # following fields will be filled later |
|
321 | 321 | self.chunkstats = [0, 0] # [adopted, total : int] |
|
322 | 322 | self.targetlines = [] # [str] |
|
323 | 323 | self.fixups = [] # [(linelog rev, a1, a2, b1, b2)] |
|
324 | 324 | self.finalcontents = [] # [str] |
|
325 | 325 | self.ctxaffected = set() |
|
326 | 326 | |
|
327 | 327 | def diffwith(self, targetfctx, fm=None): |
|
328 | 328 | """calculate fixups needed by examining the differences between |
|
329 | 329 | self.fctxs[-1] and targetfctx, chunk by chunk. |
|
330 | 330 | |
|
331 | 331 | targetfctx is the target state we move towards. we may or may not be |
|
332 | 332 | able to get there because not all modified chunks can be amended into |
|
333 | 333 | a non-public fctx unambiguously. |
|
334 | 334 | |
|
335 | 335 | call this only once, before apply(). |
|
336 | 336 | |
|
337 | 337 | update self.fixups, self.chunkstats, and self.targetlines. |
|
338 | 338 | """ |
|
339 | 339 | a = self.contents[-1] |
|
340 | 340 | alines = self.contentlines[-1] |
|
341 | 341 | b = targetfctx.data() |
|
342 | 342 | blines = mdiff.splitnewlines(b) |
|
343 | 343 | self.targetlines = blines |
|
344 | 344 | |
|
345 | 345 | self.linelog.annotate(self.linelog.maxrev) |
|
346 | 346 | annotated = self.linelog.annotateresult # [(linelog rev, linenum)] |
|
347 | 347 | assert len(annotated) == len(alines) |
|
348 | 348 | # add a dummy end line to make insertion at the end easier |
|
349 | 349 | if annotated: |
|
350 | 350 | dummyendline = (annotated[-1][0], annotated[-1][1] + 1) |
|
351 | 351 | annotated.append(dummyendline) |
|
352 | 352 | |
|
353 | 353 | # analyse diff blocks |
|
354 | 354 | for chunk in self._alldiffchunks(a, b, alines, blines): |
|
355 | 355 | newfixups = self._analysediffchunk(chunk, annotated) |
|
356 | 356 | self.chunkstats[0] += bool(newfixups) # 1 or 0 |
|
357 | 357 | self.chunkstats[1] += 1 |
|
358 | 358 | self.fixups += newfixups |
|
359 | 359 | if fm is not None: |
|
360 | 360 | self._showchanges(fm, alines, blines, chunk, newfixups) |
|
361 | 361 | |
|
362 | 362 | def apply(self): |
|
363 | 363 | """apply self.fixups. update self.linelog, self.finalcontents. |
|
364 | 364 | |
|
365 | 365 | call this only once, before getfinalcontent(), after diffwith(). |
|
366 | 366 | """ |
|
367 | 367 | # the following is unnecessary, as it's done by "diffwith": |
|
368 | 368 | # self.linelog.annotate(self.linelog.maxrev) |
|
369 | 369 | for rev, a1, a2, b1, b2 in reversed(self.fixups): |
|
370 | 370 | blines = self.targetlines[b1:b2] |
|
371 | 371 | if self.ui.debugflag: |
|
372 | 372 | idx = (max(rev - 1, 0)) // 2 |
|
373 | 373 | self.ui.write( |
|
374 | 374 | _(b'%s: chunk %d:%d -> %d lines\n') |
|
375 | 375 | % (short(self.fctxs[idx].node()), a1, a2, len(blines)) |
|
376 | 376 | ) |
|
377 | 377 | self.linelog.replacelines(rev, a1, a2, b1, b2) |
|
378 | 378 | if self.opts.get(b'edit_lines', False): |
|
379 | 379 | self.finalcontents = self._checkoutlinelogwithedits() |
|
380 | 380 | else: |
|
381 | 381 | self.finalcontents = self._checkoutlinelog() |
|
382 | 382 | |
|
383 | 383 | def getfinalcontent(self, fctx): |
|
384 | 384 | """(fctx) -> str. get modified file content for a given filecontext""" |
|
385 | 385 | idx = self.fctxs.index(fctx) |
|
386 | 386 | return self.finalcontents[idx] |
|
387 | 387 | |
|
388 | 388 | def _analysediffchunk(self, chunk, annotated): |
|
389 | 389 | """analyse a different chunk and return new fixups found |
|
390 | 390 | |
|
391 | 391 | return [] if no lines from the chunk can be safely applied. |
|
392 | 392 | |
|
393 | 393 | the chunk (or lines) cannot be safely applied, if, for example: |
|
394 | 394 | - the modified (deleted) lines belong to a public changeset |
|
395 | 395 | (self.fctxs[0]) |
|
396 | 396 | - the chunk is a pure insertion and the adjacent lines (at most 2 |
|
397 | 397 | lines) belong to different non-public changesets, or do not belong |
|
398 | 398 | to any non-public changesets. |
|
399 | 399 | - the chunk is modifying lines from different changesets. |
|
400 | 400 | in this case, if the number of lines deleted equals to the number |
|
401 | 401 | of lines added, assume it's a simple 1:1 map (could be wrong). |
|
402 | 402 | otherwise, give up. |
|
403 | 403 | - the chunk is modifying lines from a single non-public changeset, |
|
404 | 404 | but other revisions touch the area as well. i.e. the lines are |
|
405 | 405 | not continuous as seen from the linelog. |
|
406 | 406 | """ |
|
407 | 407 | a1, a2, b1, b2 = chunk |
|
408 | 408 | # find involved indexes from annotate result |
|
409 | 409 | involved = annotated[a1:a2] |
|
410 | 410 | if not involved and annotated: # a1 == a2 and a is not empty |
|
411 | 411 | # pure insertion, check nearby lines. ignore lines belong |
|
412 | 412 | # to the public (first) changeset (i.e. annotated[i][0] == 1) |
|
413 | 413 | nearbylinenums = {a2, max(0, a1 - 1)} |
|
414 | 414 | involved = [ |
|
415 | 415 | annotated[i] for i in nearbylinenums if annotated[i][0] != 1 |
|
416 | 416 | ] |
|
417 | 417 | involvedrevs = list({r for r, l in involved}) |
|
418 | 418 | newfixups = [] |
|
419 | 419 | if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True): |
|
420 | 420 | # chunk belongs to a single revision |
|
421 | 421 | rev = involvedrevs[0] |
|
422 | 422 | if rev > 1: |
|
423 | 423 | fixuprev = rev + 1 |
|
424 | 424 | newfixups.append((fixuprev, a1, a2, b1, b2)) |
|
425 | 425 | elif a2 - a1 == b2 - b1 or b1 == b2: |
|
426 | 426 | # 1:1 line mapping, or chunk was deleted |
|
427 | 427 | for i in pycompat.xrange(a1, a2): |
|
428 | 428 | rev, linenum = annotated[i] |
|
429 | 429 | if rev > 1: |
|
430 | 430 | if b1 == b2: # deletion, simply remove that single line |
|
431 | 431 | nb1 = nb2 = 0 |
|
432 | 432 | else: # 1:1 line mapping, change the corresponding rev |
|
433 | 433 | nb1 = b1 + i - a1 |
|
434 | 434 | nb2 = nb1 + 1 |
|
435 | 435 | fixuprev = rev + 1 |
|
436 | 436 | newfixups.append((fixuprev, i, i + 1, nb1, nb2)) |
|
437 | 437 | return self._optimizefixups(newfixups) |
|
438 | 438 | |
|
439 | 439 | @staticmethod |
|
440 | 440 | def _alldiffchunks(a, b, alines, blines): |
|
441 | 441 | """like mdiff.allblocks, but only care about differences""" |
|
442 | 442 | blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines) |
|
443 | 443 | for chunk, btype in blocks: |
|
444 | 444 | if btype != b'!': |
|
445 | 445 | continue |
|
446 | 446 | yield chunk |
|
447 | 447 | |
|
448 | 448 | def _buildlinelog(self): |
|
449 | 449 | """calculate the initial linelog based on self.content{,line}s. |
|
450 | 450 | this is similar to running a partial "annotate". |
|
451 | 451 | """ |
|
452 | 452 | llog = linelog.linelog() |
|
453 | 453 | a, alines = b'', [] |
|
454 | 454 | for i in pycompat.xrange(len(self.contents)): |
|
455 | 455 | b, blines = self.contents[i], self.contentlines[i] |
|
456 | 456 | llrev = i * 2 + 1 |
|
457 | 457 | chunks = self._alldiffchunks(a, b, alines, blines) |
|
458 | 458 | for a1, a2, b1, b2 in reversed(list(chunks)): |
|
459 | 459 | llog.replacelines(llrev, a1, a2, b1, b2) |
|
460 | 460 | a, alines = b, blines |
|
461 | 461 | return llog |
|
462 | 462 | |
|
463 | 463 | def _checkoutlinelog(self): |
|
464 | 464 | """() -> [str]. check out file contents from linelog""" |
|
465 | 465 | contents = [] |
|
466 | 466 | for i in pycompat.xrange(len(self.contents)): |
|
467 | 467 | rev = (i + 1) * 2 |
|
468 | 468 | self.linelog.annotate(rev) |
|
469 | 469 | content = b''.join(map(self._getline, self.linelog.annotateresult)) |
|
470 | 470 | contents.append(content) |
|
471 | 471 | return contents |
|
472 | 472 | |
|
473 | 473 | def _checkoutlinelogwithedits(self): |
|
474 | 474 | """() -> [str]. prompt all lines for edit""" |
|
475 | 475 | alllines = self.linelog.getalllines() |
|
476 | 476 | # header |
|
477 | 477 | editortext = ( |
|
478 | 478 | _( |
|
479 | 479 | b'HG: editing %s\nHG: "y" means the line to the right ' |
|
480 | 480 | b'exists in the changeset to the top\nHG:\n' |
|
481 | 481 | ) |
|
482 | 482 | % self.fctxs[-1].path() |
|
483 | 483 | ) |
|
484 | 484 | # [(idx, fctx)]. hide the dummy emptyfilecontext |
|
485 | 485 | visiblefctxs = [ |
|
486 | 486 | (i, f) |
|
487 | 487 | for i, f in enumerate(self.fctxs) |
|
488 | 488 | if not isinstance(f, emptyfilecontext) |
|
489 | 489 | ] |
|
490 | 490 | for i, (j, f) in enumerate(visiblefctxs): |
|
491 | 491 | editortext += _(b'HG: %s/%s %s %s\n') % ( |
|
492 | 492 | b'|' * i, |
|
493 | 493 | b'-' * (len(visiblefctxs) - i + 1), |
|
494 | 494 | short(f.node()), |
|
495 | 495 | f.description().split(b'\n', 1)[0], |
|
496 | 496 | ) |
|
497 | 497 | editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs)) |
|
498 | 498 | # figure out the lifetime of a line, this is relatively inefficient, |
|
499 | 499 | # but probably fine |
|
500 | 500 | lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}} |
|
501 | 501 | for i, f in visiblefctxs: |
|
502 | 502 | self.linelog.annotate((i + 1) * 2) |
|
503 | 503 | for l in self.linelog.annotateresult: |
|
504 | 504 | lineset[l].add(i) |
|
505 | 505 | # append lines |
|
506 | 506 | for l in alllines: |
|
507 | 507 | editortext += b' %s : %s' % ( |
|
508 | 508 | b''.join( |
|
509 | 509 | [ |
|
510 | 510 | (b'y' if i in lineset[l] else b' ') |
|
511 | 511 | for i, _f in visiblefctxs |
|
512 | 512 | ] |
|
513 | 513 | ), |
|
514 | 514 | self._getline(l), |
|
515 | 515 | ) |
|
516 | 516 | # run editor |
|
517 | 517 | editedtext = self.ui.edit(editortext, b'', action=b'absorb') |
|
518 | 518 | if not editedtext: |
|
519 | 519 | raise error.InputError(_(b'empty editor text')) |
|
520 | 520 | # parse edited result |
|
521 | 521 | contents = [b''] * len(self.fctxs) |
|
522 | 522 | leftpadpos = 4 |
|
523 | 523 | colonpos = leftpadpos + len(visiblefctxs) + 1 |
|
524 | 524 | for l in mdiff.splitnewlines(editedtext): |
|
525 | 525 | if l.startswith(b'HG:'): |
|
526 | 526 | continue |
|
527 | 527 | if l[colonpos - 1 : colonpos + 2] != b' : ': |
|
528 | 528 | raise error.InputError(_(b'malformed line: %s') % l) |
|
529 | 529 | linecontent = l[colonpos + 2 :] |
|
530 | 530 | for i, ch in enumerate( |
|
531 | 531 | pycompat.bytestr(l[leftpadpos : colonpos - 1]) |
|
532 | 532 | ): |
|
533 | 533 | if ch == b'y': |
|
534 | 534 | contents[visiblefctxs[i][0]] += linecontent |
|
535 | 535 | # chunkstats is hard to calculate if anything changes, therefore |
|
536 | 536 | # set them to just a simple value (1, 1). |
|
537 | 537 | if editedtext != editortext: |
|
538 | 538 | self.chunkstats = [1, 1] |
|
539 | 539 | return contents |
|
540 | 540 | |
|
541 | 541 | def _getline(self, lineinfo): |
|
542 | 542 | """((rev, linenum)) -> str. convert rev+line number to line content""" |
|
543 | 543 | rev, linenum = lineinfo |
|
544 | 544 | if rev & 1: # odd: original line taken from fctxs |
|
545 | 545 | return self.contentlines[rev // 2][linenum] |
|
546 | 546 | else: # even: fixup line from targetfctx |
|
547 | 547 | return self.targetlines[linenum] |
|
548 | 548 | |
|
549 | 549 | def _iscontinuous(self, a1, a2, closedinterval=False): |
|
550 | 550 | """(a1, a2 : int) -> bool |
|
551 | 551 | |
|
552 | 552 | check if these lines are continuous. i.e. no other insertions or |
|
553 | 553 | deletions (from other revisions) among these lines. |
|
554 | 554 | |
|
555 | 555 | closedinterval decides whether a2 should be included or not. i.e. is |
|
556 | 556 | it [a1, a2), or [a1, a2] ? |
|
557 | 557 | """ |
|
558 | 558 | if a1 >= a2: |
|
559 | 559 | return True |
|
560 | 560 | llog = self.linelog |
|
561 | 561 | offset1 = llog.getoffset(a1) |
|
562 | 562 | offset2 = llog.getoffset(a2) + int(closedinterval) |
|
563 | 563 | linesinbetween = llog.getalllines(offset1, offset2) |
|
564 | 564 | return len(linesinbetween) == a2 - a1 + int(closedinterval) |
|
565 | 565 | |
|
566 | 566 | def _optimizefixups(self, fixups): |
|
567 | 567 | """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)]. |
|
568 | 568 | merge adjacent fixups to make them less fragmented. |
|
569 | 569 | """ |
|
570 | 570 | result = [] |
|
571 | 571 | pcurrentchunk = [[-1, -1, -1, -1, -1]] |
|
572 | 572 | |
|
573 | 573 | def pushchunk(): |
|
574 | 574 | if pcurrentchunk[0][0] != -1: |
|
575 | 575 | result.append(tuple(pcurrentchunk[0])) |
|
576 | 576 | |
|
577 | 577 | for i, chunk in enumerate(fixups): |
|
578 | 578 | rev, a1, a2, b1, b2 = chunk |
|
579 | 579 | lastrev = pcurrentchunk[0][0] |
|
580 | 580 | lasta2 = pcurrentchunk[0][2] |
|
581 | 581 | lastb2 = pcurrentchunk[0][4] |
|
582 | 582 | if ( |
|
583 | 583 | a1 == lasta2 |
|
584 | 584 | and b1 == lastb2 |
|
585 | 585 | and rev == lastrev |
|
586 | 586 | and self._iscontinuous(max(a1 - 1, 0), a1) |
|
587 | 587 | ): |
|
588 | 588 | # merge into currentchunk |
|
589 | 589 | pcurrentchunk[0][2] = a2 |
|
590 | 590 | pcurrentchunk[0][4] = b2 |
|
591 | 591 | else: |
|
592 | 592 | pushchunk() |
|
593 | 593 | pcurrentchunk[0] = list(chunk) |
|
594 | 594 | pushchunk() |
|
595 | 595 | return result |
|
596 | 596 | |
|
597 | 597 | def _showchanges(self, fm, alines, blines, chunk, fixups): |
|
598 | 598 | def trim(line): |
|
599 | 599 | if line.endswith(b'\n'): |
|
600 | 600 | line = line[:-1] |
|
601 | 601 | return line |
|
602 | 602 | |
|
603 | 603 | # this is not optimized for perf but _showchanges only gets executed |
|
604 | 604 | # with an extra command-line flag. |
|
605 | 605 | a1, a2, b1, b2 = chunk |
|
606 | 606 | aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1) |
|
607 | 607 | for idx, fa1, fa2, fb1, fb2 in fixups: |
|
608 | 608 | for i in pycompat.xrange(fa1, fa2): |
|
609 | 609 | aidxs[i - a1] = (max(idx, 1) - 1) // 2 |
|
610 | 610 | for i in pycompat.xrange(fb1, fb2): |
|
611 | 611 | bidxs[i - b1] = (max(idx, 1) - 1) // 2 |
|
612 | 612 | |
|
613 | 613 | fm.startitem() |
|
614 | 614 | fm.write( |
|
615 | 615 | b'hunk', |
|
616 | 616 | b' %s\n', |
|
617 | 617 | b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1), |
|
618 | 618 | label=b'diff.hunk', |
|
619 | 619 | ) |
|
620 | 620 | fm.data(path=self.path, linetype=b'hunk') |
|
621 | 621 | |
|
622 | 622 | def writeline(idx, diffchar, line, linetype, linelabel): |
|
623 | 623 | fm.startitem() |
|
624 | 624 | node = b'' |
|
625 | 625 | if idx: |
|
626 | 626 | ctx = self.fctxs[idx] |
|
627 | 627 | fm.context(fctx=ctx) |
|
628 | 628 | node = ctx.hex() |
|
629 | 629 | self.ctxaffected.add(ctx.changectx()) |
|
630 | 630 | fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node') |
|
631 | 631 | fm.write( |
|
632 | 632 | b'diffchar ' + linetype, |
|
633 | 633 | b'%s%s\n', |
|
634 | 634 | diffchar, |
|
635 | 635 | line, |
|
636 | 636 | label=linelabel, |
|
637 | 637 | ) |
|
638 | 638 | fm.data(path=self.path, linetype=linetype) |
|
639 | 639 | |
|
640 | 640 | for i in pycompat.xrange(a1, a2): |
|
641 | 641 | writeline( |
|
642 | 642 | aidxs[i - a1], |
|
643 | 643 | b'-', |
|
644 | 644 | trim(alines[i]), |
|
645 | 645 | b'deleted', |
|
646 | 646 | b'diff.deleted', |
|
647 | 647 | ) |
|
648 | 648 | for i in pycompat.xrange(b1, b2): |
|
649 | 649 | writeline( |
|
650 | 650 | bidxs[i - b1], |
|
651 | 651 | b'+', |
|
652 | 652 | trim(blines[i]), |
|
653 | 653 | b'inserted', |
|
654 | 654 | b'diff.inserted', |
|
655 | 655 | ) |
|
656 | 656 | |
|
657 | 657 | |
|
658 | 658 | class fixupstate(object): |
|
659 | 659 | """state needed to run absorb |
|
660 | 660 | |
|
661 | 661 | internally, it keeps paths and filefixupstates. |
|
662 | 662 | |
|
663 | 663 | a typical use is like filefixupstates: |
|
664 | 664 | |
|
665 | 665 | 1. call diffwith, to calculate fixups |
|
666 | 666 | 2. (optionally), present fixups to the user, or edit fixups |
|
667 | 667 | 3. call apply, to apply changes to memory |
|
668 | 668 | 4. call commit, to commit changes to hg database |
|
669 | 669 | """ |
|
670 | 670 | |
|
671 | 671 | def __init__(self, stack, ui=None, opts=None): |
|
672 | 672 | """([ctx], ui or None) -> None |
|
673 | 673 | |
|
674 | 674 | stack: should be linear, and sorted by topo order - oldest first. |
|
675 | 675 | all commits in stack are considered mutable. |
|
676 | 676 | """ |
|
677 | 677 | assert stack |
|
678 | 678 | self.ui = ui or nullui() |
|
679 | 679 | self.opts = opts or {} |
|
680 | 680 | self.stack = stack |
|
681 | 681 | self.repo = stack[-1].repo().unfiltered() |
|
682 | 682 | |
|
683 | 683 | # following fields will be filled later |
|
684 | 684 | self.paths = [] # [str] |
|
685 | 685 | self.status = None # ctx.status output |
|
686 | 686 | self.fctxmap = {} # {path: {ctx: fctx}} |
|
687 | 687 | self.fixupmap = {} # {path: filefixupstate} |
|
688 | 688 | self.replacemap = {} # {oldnode: newnode or None} |
|
689 | 689 | self.finalnode = None # head after all fixups |
|
690 | 690 | self.ctxaffected = set() # ctx that will be absorbed into |
|
691 | 691 | |
|
692 | 692 | def diffwith(self, targetctx, match=None, fm=None): |
|
693 | 693 | """diff and prepare fixups. update self.fixupmap, self.paths""" |
|
694 | 694 | # only care about modified files |
|
695 | 695 | self.status = self.stack[-1].status(targetctx, match) |
|
696 | 696 | self.paths = [] |
|
697 | 697 | # but if --edit-lines is used, the user may want to edit files |
|
698 | 698 | # even if they are not modified |
|
699 | 699 | editopt = self.opts.get(b'edit_lines') |
|
700 | 700 | if not self.status.modified and editopt and match: |
|
701 | 701 | interestingpaths = match.files() |
|
702 | 702 | else: |
|
703 | 703 | interestingpaths = self.status.modified |
|
704 | 704 | # prepare the filefixupstate |
|
705 | 705 | seenfctxs = set() |
|
706 | 706 | # sorting is necessary to eliminate ambiguity for the "double move" |
|
707 | 707 | # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A". |
|
708 | 708 | for path in sorted(interestingpaths): |
|
709 | 709 | self.ui.debug(b'calculating fixups for %s\n' % path) |
|
710 | 710 | targetfctx = targetctx[path] |
|
711 | 711 | fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs) |
|
712 | 712 | # ignore symbolic links or binary, or unchanged files |
|
713 | 713 | if any( |
|
714 | 714 | f.islink() or stringutil.binary(f.data()) |
|
715 | 715 | for f in [targetfctx] + fctxs |
|
716 | 716 | if not isinstance(f, emptyfilecontext) |
|
717 | 717 | ): |
|
718 | 718 | continue |
|
719 | 719 | if targetfctx.data() == fctxs[-1].data() and not editopt: |
|
720 | 720 | continue |
|
721 | 721 | seenfctxs.update(fctxs[1:]) |
|
722 | 722 | self.fctxmap[path] = ctx2fctx |
|
723 | 723 | fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts) |
|
724 | 724 | if fm is not None: |
|
725 | 725 | fm.startitem() |
|
726 | 726 | fm.plain(b'showing changes for ') |
|
727 | 727 | fm.write(b'path', b'%s\n', path, label=b'absorb.path') |
|
728 | 728 | fm.data(linetype=b'path') |
|
729 | 729 | fstate.diffwith(targetfctx, fm) |
|
730 | 730 | self.fixupmap[path] = fstate |
|
731 | 731 | self.paths.append(path) |
|
732 | 732 | self.ctxaffected.update(fstate.ctxaffected) |
|
733 | 733 | |
|
734 | 734 | def apply(self): |
|
735 | 735 | """apply fixups to individual filefixupstates""" |
|
736 |
for path, state in |
|
|
736 | for path, state in self.fixupmap.items(): | |
|
737 | 737 | if self.ui.debugflag: |
|
738 | 738 | self.ui.write(_(b'applying fixups to %s\n') % path) |
|
739 | 739 | state.apply() |
|
740 | 740 | |
|
741 | 741 | @property |
|
742 | 742 | def chunkstats(self): |
|
743 | 743 | """-> {path: chunkstats}. collect chunkstats from filefixupstates""" |
|
744 | return { | |
|
745 | path: state.chunkstats | |
|
746 | for path, state in pycompat.iteritems(self.fixupmap) | |
|
747 | } | |
|
744 | return {path: state.chunkstats for path, state in self.fixupmap.items()} | |
|
748 | 745 | |
|
749 | 746 | def commit(self): |
|
750 | 747 | """commit changes. update self.finalnode, self.replacemap""" |
|
751 | 748 | with self.repo.transaction(b'absorb') as tr: |
|
752 | 749 | self._commitstack() |
|
753 | 750 | self._movebookmarks(tr) |
|
754 | 751 | if self.repo[b'.'].node() in self.replacemap: |
|
755 | 752 | self._moveworkingdirectoryparent() |
|
756 | 753 | self._cleanupoldcommits() |
|
757 | 754 | return self.finalnode |
|
758 | 755 | |
|
759 | 756 | def printchunkstats(self): |
|
760 | 757 | """print things like '1 of 2 chunk(s) applied'""" |
|
761 | 758 | ui = self.ui |
|
762 | 759 | chunkstats = self.chunkstats |
|
763 | 760 | if ui.verbose: |
|
764 | 761 | # chunkstats for each file |
|
765 |
for path, stat in |
|
|
762 | for path, stat in chunkstats.items(): | |
|
766 | 763 | if stat[0]: |
|
767 | 764 | ui.write( |
|
768 | 765 | _(b'%s: %d of %d chunk(s) applied\n') |
|
769 | 766 | % (path, stat[0], stat[1]) |
|
770 | 767 | ) |
|
771 | 768 | elif not ui.quiet: |
|
772 | 769 | # a summary for all files |
|
773 | 770 | stats = chunkstats.values() |
|
774 | 771 | applied, total = (sum(s[i] for s in stats) for i in (0, 1)) |
|
775 | 772 | ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total)) |
|
776 | 773 | |
|
777 | 774 | def _commitstack(self): |
|
778 | 775 | """make new commits. update self.finalnode, self.replacemap. |
|
779 | 776 | it is splitted from "commit" to avoid too much indentation. |
|
780 | 777 | """ |
|
781 | 778 | # last node (20-char) committed by us |
|
782 | 779 | lastcommitted = None |
|
783 | 780 | # p1 which overrides the parent of the next commit, "None" means use |
|
784 | 781 | # the original parent unchanged |
|
785 | 782 | nextp1 = None |
|
786 | 783 | for ctx in self.stack: |
|
787 | 784 | memworkingcopy = self._getnewfilecontents(ctx) |
|
788 | 785 | if not memworkingcopy and not lastcommitted: |
|
789 | 786 | # nothing changed, nothing commited |
|
790 | 787 | nextp1 = ctx |
|
791 | 788 | continue |
|
792 | 789 | willbecomenoop = ctx.files() and self._willbecomenoop( |
|
793 | 790 | memworkingcopy, ctx, nextp1 |
|
794 | 791 | ) |
|
795 | 792 | if self.skip_empty_successor and willbecomenoop: |
|
796 | 793 | # changeset is no longer necessary |
|
797 | 794 | self.replacemap[ctx.node()] = None |
|
798 | 795 | msg = _(b'became empty and was dropped') |
|
799 | 796 | else: |
|
800 | 797 | # changeset needs re-commit |
|
801 | 798 | nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1) |
|
802 | 799 | lastcommitted = self.repo[nodestr] |
|
803 | 800 | nextp1 = lastcommitted |
|
804 | 801 | self.replacemap[ctx.node()] = lastcommitted.node() |
|
805 | 802 | if memworkingcopy: |
|
806 | 803 | if willbecomenoop: |
|
807 | 804 | msg = _(b'%d file(s) changed, became empty as %s') |
|
808 | 805 | else: |
|
809 | 806 | msg = _(b'%d file(s) changed, became %s') |
|
810 | 807 | msg = msg % ( |
|
811 | 808 | len(memworkingcopy), |
|
812 | 809 | self._ctx2str(lastcommitted), |
|
813 | 810 | ) |
|
814 | 811 | else: |
|
815 | 812 | msg = _(b'became %s') % self._ctx2str(lastcommitted) |
|
816 | 813 | if self.ui.verbose and msg: |
|
817 | 814 | self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg)) |
|
818 | 815 | self.finalnode = lastcommitted and lastcommitted.node() |
|
819 | 816 | |
|
820 | 817 | def _ctx2str(self, ctx): |
|
821 | 818 | if self.ui.debugflag: |
|
822 | 819 | return b'%d:%s' % (ctx.rev(), ctx.hex()) |
|
823 | 820 | else: |
|
824 | 821 | return b'%d:%s' % (ctx.rev(), short(ctx.node())) |
|
825 | 822 | |
|
826 | 823 | def _getnewfilecontents(self, ctx): |
|
827 | 824 | """(ctx) -> {path: str} |
|
828 | 825 | |
|
829 | 826 | fetch file contents from filefixupstates. |
|
830 | 827 | return the working copy overrides - files different from ctx. |
|
831 | 828 | """ |
|
832 | 829 | result = {} |
|
833 | 830 | for path in self.paths: |
|
834 | 831 | ctx2fctx = self.fctxmap[path] # {ctx: fctx} |
|
835 | 832 | if ctx not in ctx2fctx: |
|
836 | 833 | continue |
|
837 | 834 | fctx = ctx2fctx[ctx] |
|
838 | 835 | content = fctx.data() |
|
839 | 836 | newcontent = self.fixupmap[path].getfinalcontent(fctx) |
|
840 | 837 | if content != newcontent: |
|
841 | 838 | result[fctx.path()] = newcontent |
|
842 | 839 | return result |
|
843 | 840 | |
|
844 | 841 | def _movebookmarks(self, tr): |
|
845 | 842 | repo = self.repo |
|
846 | 843 | needupdate = [ |
|
847 | 844 | (name, self.replacemap[hsh]) |
|
848 |
for name, hsh in |
|
|
845 | for name, hsh in repo._bookmarks.items() | |
|
849 | 846 | if hsh in self.replacemap |
|
850 | 847 | ] |
|
851 | 848 | changes = [] |
|
852 | 849 | for name, hsh in needupdate: |
|
853 | 850 | if hsh: |
|
854 | 851 | changes.append((name, hsh)) |
|
855 | 852 | if self.ui.verbose: |
|
856 | 853 | self.ui.write( |
|
857 | 854 | _(b'moving bookmark %s to %s\n') % (name, hex(hsh)) |
|
858 | 855 | ) |
|
859 | 856 | else: |
|
860 | 857 | changes.append((name, None)) |
|
861 | 858 | if self.ui.verbose: |
|
862 | 859 | self.ui.write(_(b'deleting bookmark %s\n') % name) |
|
863 | 860 | repo._bookmarks.applychanges(repo, tr, changes) |
|
864 | 861 | |
|
865 | 862 | def _moveworkingdirectoryparent(self): |
|
866 | 863 | if not self.finalnode: |
|
867 | 864 | # Find the latest not-{obsoleted,stripped} parent. |
|
868 | 865 | revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys()) |
|
869 | 866 | ctx = self.repo[revs.first()] |
|
870 | 867 | self.finalnode = ctx.node() |
|
871 | 868 | else: |
|
872 | 869 | ctx = self.repo[self.finalnode] |
|
873 | 870 | |
|
874 | 871 | dirstate = self.repo.dirstate |
|
875 | 872 | # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to |
|
876 | 873 | # be slow. in absorb's case, no need to invalidate fsmonitorstate. |
|
877 | 874 | noop = lambda: 0 |
|
878 | 875 | restore = noop |
|
879 | 876 | if util.safehasattr(dirstate, '_fsmonitorstate'): |
|
880 | 877 | bak = dirstate._fsmonitorstate.invalidate |
|
881 | 878 | |
|
882 | 879 | def restore(): |
|
883 | 880 | dirstate._fsmonitorstate.invalidate = bak |
|
884 | 881 | |
|
885 | 882 | dirstate._fsmonitorstate.invalidate = noop |
|
886 | 883 | try: |
|
887 | 884 | with dirstate.parentchange(): |
|
888 | 885 | dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths) |
|
889 | 886 | finally: |
|
890 | 887 | restore() |
|
891 | 888 | |
|
892 | 889 | @staticmethod |
|
893 | 890 | def _willbecomenoop(memworkingcopy, ctx, pctx=None): |
|
894 | 891 | """({path: content}, ctx, ctx) -> bool. test if a commit will be noop |
|
895 | 892 | |
|
896 | 893 | if it will become an empty commit (does not change anything, after the |
|
897 | 894 | memworkingcopy overrides), return True. otherwise return False. |
|
898 | 895 | """ |
|
899 | 896 | if not pctx: |
|
900 | 897 | parents = ctx.parents() |
|
901 | 898 | if len(parents) != 1: |
|
902 | 899 | return False |
|
903 | 900 | pctx = parents[0] |
|
904 | 901 | if ctx.branch() != pctx.branch(): |
|
905 | 902 | return False |
|
906 | 903 | if ctx.extra().get(b'close'): |
|
907 | 904 | return False |
|
908 | 905 | # ctx changes more files (not a subset of memworkingcopy) |
|
909 | 906 | if not set(ctx.files()).issubset(set(memworkingcopy)): |
|
910 | 907 | return False |
|
911 |
for path, content in |
|
|
908 | for path, content in memworkingcopy.items(): | |
|
912 | 909 | if path not in pctx or path not in ctx: |
|
913 | 910 | return False |
|
914 | 911 | fctx = ctx[path] |
|
915 | 912 | pfctx = pctx[path] |
|
916 | 913 | if pfctx.flags() != fctx.flags(): |
|
917 | 914 | return False |
|
918 | 915 | if pfctx.data() != content: |
|
919 | 916 | return False |
|
920 | 917 | return True |
|
921 | 918 | |
|
922 | 919 | def _commitsingle(self, memworkingcopy, ctx, p1=None): |
|
923 | 920 | """(ctx, {path: content}, node) -> node. make a single commit |
|
924 | 921 | |
|
925 | 922 | the commit is a clone from ctx, with a (optionally) different p1, and |
|
926 | 923 | different file contents replaced by memworkingcopy. |
|
927 | 924 | """ |
|
928 | 925 | parents = p1 and (p1, self.repo.nullid) |
|
929 | 926 | extra = ctx.extra() |
|
930 | 927 | if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'): |
|
931 | 928 | extra[b'absorb_source'] = ctx.hex() |
|
932 | 929 | |
|
933 | 930 | desc = rewriteutil.update_hash_refs( |
|
934 | 931 | ctx.repo(), |
|
935 | 932 | ctx.description(), |
|
936 | 933 | { |
|
937 | 934 | oldnode: [newnode] |
|
938 | 935 | for oldnode, newnode in self.replacemap.items() |
|
939 | 936 | }, |
|
940 | 937 | ) |
|
941 | 938 | mctx = overlaycontext( |
|
942 | 939 | memworkingcopy, ctx, parents, extra=extra, desc=desc |
|
943 | 940 | ) |
|
944 | 941 | return mctx.commit() |
|
945 | 942 | |
|
946 | 943 | @util.propertycache |
|
947 | 944 | def _useobsolete(self): |
|
948 | 945 | """() -> bool""" |
|
949 | 946 | return obsolete.isenabled(self.repo, obsolete.createmarkersopt) |
|
950 | 947 | |
|
951 | 948 | def _cleanupoldcommits(self): |
|
952 | 949 | replacements = { |
|
953 | 950 | k: ([v] if v is not None else []) |
|
954 |
for k, v in |
|
|
951 | for k, v in self.replacemap.items() | |
|
955 | 952 | } |
|
956 | 953 | if replacements: |
|
957 | 954 | scmutil.cleanupnodes( |
|
958 | 955 | self.repo, replacements, operation=b'absorb', fixphase=True |
|
959 | 956 | ) |
|
960 | 957 | |
|
961 | 958 | @util.propertycache |
|
962 | 959 | def skip_empty_successor(self): |
|
963 | 960 | return rewriteutil.skip_empty_successor(self.ui, b'absorb') |
|
964 | 961 | |
|
965 | 962 | |
|
966 | 963 | def _parsechunk(hunk): |
|
967 | 964 | """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))""" |
|
968 | 965 | if type(hunk) not in (crecord.uihunk, patch.recordhunk): |
|
969 | 966 | return None, None |
|
970 | 967 | path = hunk.header.filename() |
|
971 | 968 | a1 = hunk.fromline + len(hunk.before) - 1 |
|
972 | 969 | # remove before and after context |
|
973 | 970 | hunk.before = hunk.after = [] |
|
974 | 971 | buf = util.stringio() |
|
975 | 972 | hunk.write(buf) |
|
976 | 973 | patchlines = mdiff.splitnewlines(buf.getvalue()) |
|
977 | 974 | # hunk.prettystr() will update hunk.removed |
|
978 | 975 | a2 = a1 + hunk.removed |
|
979 | 976 | blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')] |
|
980 | 977 | return path, (a1, a2, blines) |
|
981 | 978 | |
|
982 | 979 | |
|
983 | 980 | def overlaydiffcontext(ctx, chunks): |
|
984 | 981 | """(ctx, [crecord.uihunk]) -> memctx |
|
985 | 982 | |
|
986 | 983 | return a memctx with some [1] patches (chunks) applied to ctx. |
|
987 | 984 | [1]: modifications are handled. renames, mode changes, etc. are ignored. |
|
988 | 985 | """ |
|
989 | 986 | # sadly the applying-patch logic is hardly reusable, and messy: |
|
990 | 987 | # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it |
|
991 | 988 | # needs a file stream of a patch and will re-parse it, while we have |
|
992 | 989 | # structured hunk objects at hand. |
|
993 | 990 | # 2. a lot of different implementations about "chunk" (patch.hunk, |
|
994 | 991 | # patch.recordhunk, crecord.uihunk) |
|
995 | 992 | # as we only care about applying changes to modified files, no mode |
|
996 | 993 | # change, no binary diff, and no renames, it's probably okay to |
|
997 | 994 | # re-invent the logic using much simpler code here. |
|
998 | 995 | memworkingcopy = {} # {path: content} |
|
999 | 996 | patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]} |
|
1000 | 997 | for path, info in map(_parsechunk, chunks): |
|
1001 | 998 | if not path or not info: |
|
1002 | 999 | continue |
|
1003 | 1000 | patchmap[path].append(info) |
|
1004 |
for path, patches in p |
|
|
1001 | for path, patches in patchmap.items(): | |
|
1005 | 1002 | if path not in ctx or not patches: |
|
1006 | 1003 | continue |
|
1007 | 1004 | patches.sort(reverse=True) |
|
1008 | 1005 | lines = mdiff.splitnewlines(ctx[path].data()) |
|
1009 | 1006 | for a1, a2, blines in patches: |
|
1010 | 1007 | lines[a1:a2] = blines |
|
1011 | 1008 | memworkingcopy[path] = b''.join(lines) |
|
1012 | 1009 | return overlaycontext(memworkingcopy, ctx) |
|
1013 | 1010 | |
|
1014 | 1011 | |
|
1015 | 1012 | def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None): |
|
1016 | 1013 | """pick fixup chunks from targetctx, apply them to stack. |
|
1017 | 1014 | |
|
1018 | 1015 | if targetctx is None, the working copy context will be used. |
|
1019 | 1016 | if stack is None, the current draft stack will be used. |
|
1020 | 1017 | return fixupstate. |
|
1021 | 1018 | """ |
|
1022 | 1019 | if stack is None: |
|
1023 | 1020 | limit = ui.configint(b'absorb', b'max-stack-size') |
|
1024 | 1021 | headctx = repo[b'.'] |
|
1025 | 1022 | if len(headctx.parents()) > 1: |
|
1026 | 1023 | raise error.InputError(_(b'cannot absorb into a merge')) |
|
1027 | 1024 | stack = getdraftstack(headctx, limit) |
|
1028 | 1025 | if limit and len(stack) >= limit: |
|
1029 | 1026 | ui.warn( |
|
1030 | 1027 | _( |
|
1031 | 1028 | b'absorb: only the recent %d changesets will ' |
|
1032 | 1029 | b'be analysed\n' |
|
1033 | 1030 | ) |
|
1034 | 1031 | % limit |
|
1035 | 1032 | ) |
|
1036 | 1033 | if not stack: |
|
1037 | 1034 | raise error.InputError(_(b'no mutable changeset to change')) |
|
1038 | 1035 | if targetctx is None: # default to working copy |
|
1039 | 1036 | targetctx = repo[None] |
|
1040 | 1037 | if pats is None: |
|
1041 | 1038 | pats = () |
|
1042 | 1039 | if opts is None: |
|
1043 | 1040 | opts = {} |
|
1044 | 1041 | state = fixupstate(stack, ui=ui, opts=opts) |
|
1045 | 1042 | matcher = scmutil.match(targetctx, pats, opts) |
|
1046 | 1043 | if opts.get(b'interactive'): |
|
1047 | 1044 | diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher) |
|
1048 | 1045 | origchunks = patch.parsepatch(diff) |
|
1049 | 1046 | chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0] |
|
1050 | 1047 | targetctx = overlaydiffcontext(stack[-1], chunks) |
|
1051 | 1048 | fm = None |
|
1052 | 1049 | if opts.get(b'print_changes') or not opts.get(b'apply_changes'): |
|
1053 | 1050 | fm = ui.formatter(b'absorb', opts) |
|
1054 | 1051 | state.diffwith(targetctx, matcher, fm) |
|
1055 | 1052 | if fm is not None: |
|
1056 | 1053 | fm.startitem() |
|
1057 | 1054 | fm.write( |
|
1058 | 1055 | b"count", b"\n%d changesets affected\n", len(state.ctxaffected) |
|
1059 | 1056 | ) |
|
1060 | 1057 | fm.data(linetype=b'summary') |
|
1061 | 1058 | for ctx in reversed(stack): |
|
1062 | 1059 | if ctx not in state.ctxaffected: |
|
1063 | 1060 | continue |
|
1064 | 1061 | fm.startitem() |
|
1065 | 1062 | fm.context(ctx=ctx) |
|
1066 | 1063 | fm.data(linetype=b'changeset') |
|
1067 | 1064 | fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node') |
|
1068 | 1065 | descfirstline = ctx.description().splitlines()[0] |
|
1069 | 1066 | fm.write( |
|
1070 | 1067 | b'descfirstline', |
|
1071 | 1068 | b'%s\n', |
|
1072 | 1069 | descfirstline, |
|
1073 | 1070 | label=b'absorb.description', |
|
1074 | 1071 | ) |
|
1075 | 1072 | fm.end() |
|
1076 | 1073 | if not opts.get(b'dry_run'): |
|
1077 | 1074 | if ( |
|
1078 | 1075 | not opts.get(b'apply_changes') |
|
1079 | 1076 | and state.ctxaffected |
|
1080 | 1077 | and ui.promptchoice( |
|
1081 | 1078 | b"apply changes (y/N)? $$ &Yes $$ &No", default=1 |
|
1082 | 1079 | ) |
|
1083 | 1080 | ): |
|
1084 | 1081 | raise error.CanceledError(_(b'absorb cancelled\n')) |
|
1085 | 1082 | |
|
1086 | 1083 | state.apply() |
|
1087 | 1084 | if state.commit(): |
|
1088 | 1085 | state.printchunkstats() |
|
1089 | 1086 | elif not ui.quiet: |
|
1090 | 1087 | ui.write(_(b'nothing applied\n')) |
|
1091 | 1088 | return state |
|
1092 | 1089 | |
|
1093 | 1090 | |
|
1094 | 1091 | @command( |
|
1095 | 1092 | b'absorb', |
|
1096 | 1093 | [ |
|
1097 | 1094 | ( |
|
1098 | 1095 | b'a', |
|
1099 | 1096 | b'apply-changes', |
|
1100 | 1097 | None, |
|
1101 | 1098 | _(b'apply changes without prompting for confirmation'), |
|
1102 | 1099 | ), |
|
1103 | 1100 | ( |
|
1104 | 1101 | b'p', |
|
1105 | 1102 | b'print-changes', |
|
1106 | 1103 | None, |
|
1107 | 1104 | _(b'always print which changesets are modified by which changes'), |
|
1108 | 1105 | ), |
|
1109 | 1106 | ( |
|
1110 | 1107 | b'i', |
|
1111 | 1108 | b'interactive', |
|
1112 | 1109 | None, |
|
1113 | 1110 | _(b'interactively select which chunks to apply'), |
|
1114 | 1111 | ), |
|
1115 | 1112 | ( |
|
1116 | 1113 | b'e', |
|
1117 | 1114 | b'edit-lines', |
|
1118 | 1115 | None, |
|
1119 | 1116 | _( |
|
1120 | 1117 | b'edit what lines belong to which changesets before commit ' |
|
1121 | 1118 | b'(EXPERIMENTAL)' |
|
1122 | 1119 | ), |
|
1123 | 1120 | ), |
|
1124 | 1121 | ] |
|
1125 | 1122 | + commands.dryrunopts |
|
1126 | 1123 | + commands.templateopts |
|
1127 | 1124 | + commands.walkopts, |
|
1128 | 1125 | _(b'hg absorb [OPTION] [FILE]...'), |
|
1129 | 1126 | helpcategory=command.CATEGORY_COMMITTING, |
|
1130 | 1127 | helpbasic=True, |
|
1131 | 1128 | ) |
|
1132 | 1129 | def absorbcmd(ui, repo, *pats, **opts): |
|
1133 | 1130 | """incorporate corrections into the stack of draft changesets |
|
1134 | 1131 | |
|
1135 | 1132 | absorb analyzes each change in your working directory and attempts to |
|
1136 | 1133 | amend the changed lines into the changesets in your stack that first |
|
1137 | 1134 | introduced those lines. |
|
1138 | 1135 | |
|
1139 | 1136 | If absorb cannot find an unambiguous changeset to amend for a change, |
|
1140 | 1137 | that change will be left in the working directory, untouched. They can be |
|
1141 | 1138 | observed by :hg:`status` or :hg:`diff` afterwards. In other words, |
|
1142 | 1139 | absorb does not write to the working directory. |
|
1143 | 1140 | |
|
1144 | 1141 | Changesets outside the revset `::. and not public() and not merge()` will |
|
1145 | 1142 | not be changed. |
|
1146 | 1143 | |
|
1147 | 1144 | Changesets that become empty after applying the changes will be deleted. |
|
1148 | 1145 | |
|
1149 | 1146 | By default, absorb will show what it plans to do and prompt for |
|
1150 | 1147 | confirmation. If you are confident that the changes will be absorbed |
|
1151 | 1148 | to the correct place, run :hg:`absorb -a` to apply the changes |
|
1152 | 1149 | immediately. |
|
1153 | 1150 | |
|
1154 | 1151 | Returns 0 on success, 1 if all chunks were ignored and nothing amended. |
|
1155 | 1152 | """ |
|
1156 | 1153 | opts = pycompat.byteskwargs(opts) |
|
1157 | 1154 | |
|
1158 | 1155 | with repo.wlock(), repo.lock(): |
|
1159 | 1156 | if not opts[b'dry_run']: |
|
1160 | 1157 | cmdutil.checkunfinished(repo) |
|
1161 | 1158 | |
|
1162 | 1159 | state = absorb(ui, repo, pats=pats, opts=opts) |
|
1163 | 1160 | if sum(s[0] for s in state.chunkstats.values()) == 0: |
|
1164 | 1161 | return 1 |
@@ -1,338 +1,337 b'' | |||
|
1 | 1 | # bzr.py - bzr support for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008, 2009 Marek Kubica <marek@xivilization.net> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | # This module is for handling Breezy imports or `brz`, but it's also compatible |
|
9 | 9 | # with Bazaar or `bzr`, that was formerly known as Bazaar-NG; |
|
10 | 10 | # it cannot access `bar` repositories, but they were never used very much. |
|
11 | 11 | |
|
12 | 12 | import os |
|
13 | 13 | |
|
14 | 14 | from mercurial.i18n import _ |
|
15 | 15 | from mercurial import ( |
|
16 | 16 | demandimport, |
|
17 | 17 | error, |
|
18 | pycompat, | |
|
19 | 18 | util, |
|
20 | 19 | ) |
|
21 | 20 | from . import common |
|
22 | 21 | |
|
23 | 22 | |
|
24 | 23 | # these do not work with demandimport, blacklist |
|
25 | 24 | demandimport.IGNORES.update( |
|
26 | 25 | [ |
|
27 | 26 | b'breezy.transactions', |
|
28 | 27 | b'breezy.urlutils', |
|
29 | 28 | b'ElementPath', |
|
30 | 29 | ] |
|
31 | 30 | ) |
|
32 | 31 | |
|
33 | 32 | try: |
|
34 | 33 | # bazaar imports |
|
35 | 34 | import breezy.bzr.bzrdir |
|
36 | 35 | import breezy.errors |
|
37 | 36 | import breezy.revision |
|
38 | 37 | import breezy.revisionspec |
|
39 | 38 | |
|
40 | 39 | bzrdir = breezy.bzr.bzrdir |
|
41 | 40 | errors = breezy.errors |
|
42 | 41 | revision = breezy.revision |
|
43 | 42 | revisionspec = breezy.revisionspec |
|
44 | 43 | revisionspec.RevisionSpec |
|
45 | 44 | except ImportError: |
|
46 | 45 | pass |
|
47 | 46 | |
|
48 | 47 | supportedkinds = ('file', 'symlink') |
|
49 | 48 | |
|
50 | 49 | |
|
51 | 50 | class bzr_source(common.converter_source): |
|
52 | 51 | """Reads Bazaar repositories by using the Bazaar Python libraries""" |
|
53 | 52 | |
|
54 | 53 | def __init__(self, ui, repotype, path, revs=None): |
|
55 | 54 | super(bzr_source, self).__init__(ui, repotype, path, revs=revs) |
|
56 | 55 | |
|
57 | 56 | if not os.path.exists(os.path.join(path, b'.bzr')): |
|
58 | 57 | raise common.NoRepo( |
|
59 | 58 | _(b'%s does not look like a Bazaar repository') % path |
|
60 | 59 | ) |
|
61 | 60 | |
|
62 | 61 | try: |
|
63 | 62 | # access breezy stuff |
|
64 | 63 | bzrdir |
|
65 | 64 | except NameError: |
|
66 | 65 | raise common.NoRepo(_(b'Bazaar modules could not be loaded')) |
|
67 | 66 | |
|
68 | 67 | path = util.abspath(path) |
|
69 | 68 | self._checkrepotype(path) |
|
70 | 69 | try: |
|
71 | 70 | bzr_dir = bzrdir.BzrDir.open(path.decode()) |
|
72 | 71 | self.sourcerepo = bzr_dir.open_repository() |
|
73 | 72 | except errors.NoRepositoryPresent: |
|
74 | 73 | raise common.NoRepo( |
|
75 | 74 | _(b'%s does not look like a Bazaar repository') % path |
|
76 | 75 | ) |
|
77 | 76 | self._parentids = {} |
|
78 | 77 | self._saverev = ui.configbool(b'convert', b'bzr.saverev') |
|
79 | 78 | |
|
80 | 79 | def _checkrepotype(self, path): |
|
81 | 80 | # Lightweight checkouts detection is informational but probably |
|
82 | 81 | # fragile at API level. It should not terminate the conversion. |
|
83 | 82 | try: |
|
84 | 83 | dir = bzrdir.BzrDir.open_containing(path.decode())[0] |
|
85 | 84 | try: |
|
86 | 85 | tree = dir.open_workingtree(recommend_upgrade=False) |
|
87 | 86 | branch = tree.branch |
|
88 | 87 | except (errors.NoWorkingTree, errors.NotLocalUrl): |
|
89 | 88 | tree = None |
|
90 | 89 | branch = dir.open_branch() |
|
91 | 90 | if ( |
|
92 | 91 | tree is not None |
|
93 | 92 | and tree.controldir.root_transport.base |
|
94 | 93 | != branch.controldir.root_transport.base |
|
95 | 94 | ): |
|
96 | 95 | self.ui.warn( |
|
97 | 96 | _( |
|
98 | 97 | b'warning: lightweight checkouts may cause ' |
|
99 | 98 | b'conversion failures, try with a regular ' |
|
100 | 99 | b'branch instead.\n' |
|
101 | 100 | ) |
|
102 | 101 | ) |
|
103 | 102 | except Exception: |
|
104 | 103 | self.ui.note(_(b'bzr source type could not be determined\n')) |
|
105 | 104 | |
|
106 | 105 | def before(self): |
|
107 | 106 | """Before the conversion begins, acquire a read lock |
|
108 | 107 | for all the operations that might need it. Fortunately |
|
109 | 108 | read locks don't block other reads or writes to the |
|
110 | 109 | repository, so this shouldn't have any impact on the usage of |
|
111 | 110 | the source repository. |
|
112 | 111 | |
|
113 | 112 | The alternative would be locking on every operation that |
|
114 | 113 | needs locks (there are currently two: getting the file and |
|
115 | 114 | getting the parent map) and releasing immediately after, |
|
116 | 115 | but this approach can take even 40% longer.""" |
|
117 | 116 | self.sourcerepo.lock_read() |
|
118 | 117 | |
|
119 | 118 | def after(self): |
|
120 | 119 | self.sourcerepo.unlock() |
|
121 | 120 | |
|
122 | 121 | def _bzrbranches(self): |
|
123 | 122 | return self.sourcerepo.find_branches(using=True) |
|
124 | 123 | |
|
125 | 124 | def getheads(self): |
|
126 | 125 | if not self.revs: |
|
127 | 126 | # Set using=True to avoid nested repositories (see issue3254) |
|
128 | 127 | heads = sorted([b.last_revision() for b in self._bzrbranches()]) |
|
129 | 128 | else: |
|
130 | 129 | revid = None |
|
131 | 130 | for branch in self._bzrbranches(): |
|
132 | 131 | try: |
|
133 | 132 | revspec = self.revs[0].decode() |
|
134 | 133 | r = revisionspec.RevisionSpec.from_string(revspec) |
|
135 | 134 | info = r.in_history(branch) |
|
136 | 135 | except errors.BzrError: |
|
137 | 136 | pass |
|
138 | 137 | revid = info.rev_id |
|
139 | 138 | if revid is None: |
|
140 | 139 | raise error.Abort( |
|
141 | 140 | _(b'%s is not a valid revision') % self.revs[0] |
|
142 | 141 | ) |
|
143 | 142 | heads = [revid] |
|
144 | 143 | # Empty repositories return 'null:', which cannot be retrieved |
|
145 | 144 | heads = [h for h in heads if h != b'null:'] |
|
146 | 145 | return heads |
|
147 | 146 | |
|
148 | 147 | def getfile(self, name, rev): |
|
149 | 148 | name = name.decode() |
|
150 | 149 | revtree = self.sourcerepo.revision_tree(rev) |
|
151 | 150 | |
|
152 | 151 | try: |
|
153 | 152 | kind = revtree.kind(name) |
|
154 | 153 | except breezy.errors.NoSuchFile: |
|
155 | 154 | return None, None |
|
156 | 155 | if kind not in supportedkinds: |
|
157 | 156 | # the file is not available anymore - was deleted |
|
158 | 157 | return None, None |
|
159 | 158 | mode = self._modecache[(name.encode(), rev)] |
|
160 | 159 | if kind == 'symlink': |
|
161 | 160 | target = revtree.get_symlink_target(name) |
|
162 | 161 | if target is None: |
|
163 | 162 | raise error.Abort( |
|
164 | 163 | _(b'%s.%s symlink has no target') % (name, rev) |
|
165 | 164 | ) |
|
166 | 165 | return target.encode(), mode |
|
167 | 166 | else: |
|
168 | 167 | sio = revtree.get_file(name) |
|
169 | 168 | return sio.read(), mode |
|
170 | 169 | |
|
171 | 170 | def getchanges(self, version, full): |
|
172 | 171 | if full: |
|
173 | 172 | raise error.Abort(_(b"convert from cvs does not support --full")) |
|
174 | 173 | self._modecache = {} |
|
175 | 174 | self._revtree = self.sourcerepo.revision_tree(version) |
|
176 | 175 | # get the parentids from the cache |
|
177 | 176 | parentids = self._parentids.pop(version) |
|
178 | 177 | # only diff against first parent id |
|
179 | 178 | prevtree = self.sourcerepo.revision_tree(parentids[0]) |
|
180 | 179 | files, changes = self._gettreechanges(self._revtree, prevtree) |
|
181 | 180 | return files, changes, set() |
|
182 | 181 | |
|
183 | 182 | def getcommit(self, version): |
|
184 | 183 | rev = self.sourcerepo.get_revision(version) |
|
185 | 184 | # populate parent id cache |
|
186 | 185 | if not rev.parent_ids: |
|
187 | 186 | parents = [] |
|
188 | 187 | self._parentids[version] = (revision.NULL_REVISION,) |
|
189 | 188 | else: |
|
190 | 189 | parents = self._filterghosts(rev.parent_ids) |
|
191 | 190 | self._parentids[version] = parents |
|
192 | 191 | |
|
193 | 192 | branch = rev.properties.get('branch-nick', 'default') |
|
194 | 193 | if branch == 'trunk': |
|
195 | 194 | branch = 'default' |
|
196 | 195 | return common.commit( |
|
197 | 196 | parents=parents, |
|
198 | 197 | date=b'%d %d' % (rev.timestamp, -rev.timezone), |
|
199 | 198 | author=self.recode(rev.committer), |
|
200 | 199 | desc=self.recode(rev.message), |
|
201 | 200 | branch=branch.encode('utf8'), |
|
202 | 201 | rev=version, |
|
203 | 202 | saverev=self._saverev, |
|
204 | 203 | ) |
|
205 | 204 | |
|
206 | 205 | def gettags(self): |
|
207 | 206 | bytetags = {} |
|
208 | 207 | for branch in self._bzrbranches(): |
|
209 | 208 | if not branch.supports_tags(): |
|
210 | 209 | return {} |
|
211 | 210 | tagdict = branch.tags.get_tag_dict() |
|
212 |
for name, rev in |
|
|
211 | for name, rev in tagdict.items(): | |
|
213 | 212 | bytetags[self.recode(name)] = rev |
|
214 | 213 | return bytetags |
|
215 | 214 | |
|
216 | 215 | def getchangedfiles(self, rev, i): |
|
217 | 216 | self._modecache = {} |
|
218 | 217 | curtree = self.sourcerepo.revision_tree(rev) |
|
219 | 218 | if i is not None: |
|
220 | 219 | parentid = self._parentids[rev][i] |
|
221 | 220 | else: |
|
222 | 221 | # no parent id, get the empty revision |
|
223 | 222 | parentid = revision.NULL_REVISION |
|
224 | 223 | |
|
225 | 224 | prevtree = self.sourcerepo.revision_tree(parentid) |
|
226 | 225 | changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]] |
|
227 | 226 | return changes |
|
228 | 227 | |
|
229 | 228 | def _gettreechanges(self, current, origin): |
|
230 | 229 | revid = current._revision_id |
|
231 | 230 | changes = [] |
|
232 | 231 | renames = {} |
|
233 | 232 | seen = set() |
|
234 | 233 | |
|
235 | 234 | # Fall back to the deprecated attribute for legacy installations. |
|
236 | 235 | try: |
|
237 | 236 | inventory = origin.root_inventory |
|
238 | 237 | except AttributeError: |
|
239 | 238 | inventory = origin.inventory |
|
240 | 239 | |
|
241 | 240 | # Process the entries by reverse lexicographic name order to |
|
242 | 241 | # handle nested renames correctly, most specific first. |
|
243 | 242 | |
|
244 | 243 | def key(c): |
|
245 | 244 | return c.path[0] or c.path[1] or "" |
|
246 | 245 | |
|
247 | 246 | curchanges = sorted( |
|
248 | 247 | current.iter_changes(origin), |
|
249 | 248 | key=key, |
|
250 | 249 | reverse=True, |
|
251 | 250 | ) |
|
252 | 251 | for change in curchanges: |
|
253 | 252 | paths = change.path |
|
254 | 253 | kind = change.kind |
|
255 | 254 | executable = change.executable |
|
256 | 255 | if paths[0] == u'' or paths[1] == u'': |
|
257 | 256 | # ignore changes to tree root |
|
258 | 257 | continue |
|
259 | 258 | |
|
260 | 259 | # bazaar tracks directories, mercurial does not, so |
|
261 | 260 | # we have to rename the directory contents |
|
262 | 261 | if kind[1] == 'directory': |
|
263 | 262 | if kind[0] not in (None, 'directory'): |
|
264 | 263 | # Replacing 'something' with a directory, record it |
|
265 | 264 | # so it can be removed. |
|
266 | 265 | changes.append((self.recode(paths[0]), revid)) |
|
267 | 266 | |
|
268 | 267 | if kind[0] == 'directory' and None not in paths: |
|
269 | 268 | renaming = paths[0] != paths[1] |
|
270 | 269 | # neither an add nor an delete - a move |
|
271 | 270 | # rename all directory contents manually |
|
272 | 271 | subdir = inventory.path2id(paths[0]) |
|
273 | 272 | # get all child-entries of the directory |
|
274 | 273 | for name, entry in inventory.iter_entries(subdir): |
|
275 | 274 | # hg does not track directory renames |
|
276 | 275 | if entry.kind == 'directory': |
|
277 | 276 | continue |
|
278 | 277 | frompath = self.recode(paths[0] + '/' + name) |
|
279 | 278 | if frompath in seen: |
|
280 | 279 | # Already handled by a more specific change entry |
|
281 | 280 | # This is important when you have: |
|
282 | 281 | # a => b |
|
283 | 282 | # a/c => a/c |
|
284 | 283 | # Here a/c must not be renamed into b/c |
|
285 | 284 | continue |
|
286 | 285 | seen.add(frompath) |
|
287 | 286 | if not renaming: |
|
288 | 287 | continue |
|
289 | 288 | topath = self.recode(paths[1] + '/' + name) |
|
290 | 289 | # register the files as changed |
|
291 | 290 | changes.append((frompath, revid)) |
|
292 | 291 | changes.append((topath, revid)) |
|
293 | 292 | # add to mode cache |
|
294 | 293 | mode = ( |
|
295 | 294 | (entry.executable and b'x') |
|
296 | 295 | or (entry.kind == 'symlink' and b's') |
|
297 | 296 | or b'' |
|
298 | 297 | ) |
|
299 | 298 | self._modecache[(topath, revid)] = mode |
|
300 | 299 | # register the change as move |
|
301 | 300 | renames[topath] = frompath |
|
302 | 301 | |
|
303 | 302 | # no further changes, go to the next change |
|
304 | 303 | continue |
|
305 | 304 | |
|
306 | 305 | # we got unicode paths, need to convert them |
|
307 | 306 | path, topath = paths |
|
308 | 307 | if path is not None: |
|
309 | 308 | path = self.recode(path) |
|
310 | 309 | if topath is not None: |
|
311 | 310 | topath = self.recode(topath) |
|
312 | 311 | seen.add(path or topath) |
|
313 | 312 | |
|
314 | 313 | if topath is None: |
|
315 | 314 | # file deleted |
|
316 | 315 | changes.append((path, revid)) |
|
317 | 316 | continue |
|
318 | 317 | |
|
319 | 318 | # renamed |
|
320 | 319 | if path and path != topath: |
|
321 | 320 | renames[topath] = path |
|
322 | 321 | changes.append((path, revid)) |
|
323 | 322 | |
|
324 | 323 | # populate the mode cache |
|
325 | 324 | kind, executable = [e[1] for e in (kind, executable)] |
|
326 | 325 | mode = (executable and b'x') or (kind == 'symlink' and b'l') or b'' |
|
327 | 326 | self._modecache[(topath, revid)] = mode |
|
328 | 327 | changes.append((topath, revid)) |
|
329 | 328 | |
|
330 | 329 | return changes, renames |
|
331 | 330 | |
|
332 | 331 | def _filterghosts(self, ids): |
|
333 | 332 | """Filters out ghost revisions which hg does not support, see |
|
334 | 333 | <http://bazaar-vcs.org/GhostRevision> |
|
335 | 334 | """ |
|
336 | 335 | parentmap = self.sourcerepo.get_parent_map(ids) |
|
337 | 336 | parents = tuple([parent for parent in ids if parent in parentmap]) |
|
338 | 337 | return parents |
@@ -1,597 +1,597 b'' | |||
|
1 | 1 | # common.py - common code for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import base64 |
|
9 | 9 | import datetime |
|
10 | 10 | import errno |
|
11 | 11 | import os |
|
12 | 12 | import pickle |
|
13 | 13 | import re |
|
14 | 14 | import shlex |
|
15 | 15 | import subprocess |
|
16 | 16 | |
|
17 | 17 | from mercurial.i18n import _ |
|
18 | 18 | from mercurial.pycompat import open |
|
19 | 19 | from mercurial import ( |
|
20 | 20 | encoding, |
|
21 | 21 | error, |
|
22 | 22 | phases, |
|
23 | 23 | pycompat, |
|
24 | 24 | util, |
|
25 | 25 | ) |
|
26 | 26 | from mercurial.utils import procutil |
|
27 | 27 | |
|
28 | 28 | propertycache = util.propertycache |
|
29 | 29 | |
|
30 | 30 | |
|
31 | 31 | def _encodeornone(d): |
|
32 | 32 | if d is None: |
|
33 | 33 | return |
|
34 | 34 | return d.encode('latin1') |
|
35 | 35 | |
|
36 | 36 | |
|
37 | 37 | class _shlexpy3proxy(object): |
|
38 | 38 | def __init__(self, l): |
|
39 | 39 | self._l = l |
|
40 | 40 | |
|
41 | 41 | def __iter__(self): |
|
42 | 42 | return (_encodeornone(v) for v in self._l) |
|
43 | 43 | |
|
44 | 44 | def get_token(self): |
|
45 | 45 | return _encodeornone(self._l.get_token()) |
|
46 | 46 | |
|
47 | 47 | @property |
|
48 | 48 | def infile(self): |
|
49 | 49 | return self._l.infile or b'<unknown>' |
|
50 | 50 | |
|
51 | 51 | @property |
|
52 | 52 | def lineno(self): |
|
53 | 53 | return self._l.lineno |
|
54 | 54 | |
|
55 | 55 | |
|
56 | 56 | def shlexer(data=None, filepath=None, wordchars=None, whitespace=None): |
|
57 | 57 | if data is None: |
|
58 | 58 | if pycompat.ispy3: |
|
59 | 59 | data = open(filepath, b'r', encoding='latin1') |
|
60 | 60 | else: |
|
61 | 61 | data = open(filepath, b'r') |
|
62 | 62 | else: |
|
63 | 63 | if filepath is not None: |
|
64 | 64 | raise error.ProgrammingError( |
|
65 | 65 | b'shlexer only accepts data or filepath, not both' |
|
66 | 66 | ) |
|
67 | 67 | if pycompat.ispy3: |
|
68 | 68 | data = data.decode('latin1') |
|
69 | 69 | l = shlex.shlex(data, infile=filepath, posix=True) |
|
70 | 70 | if whitespace is not None: |
|
71 | 71 | l.whitespace_split = True |
|
72 | 72 | if pycompat.ispy3: |
|
73 | 73 | l.whitespace += whitespace.decode('latin1') |
|
74 | 74 | else: |
|
75 | 75 | l.whitespace += whitespace |
|
76 | 76 | if wordchars is not None: |
|
77 | 77 | if pycompat.ispy3: |
|
78 | 78 | l.wordchars += wordchars.decode('latin1') |
|
79 | 79 | else: |
|
80 | 80 | l.wordchars += wordchars |
|
81 | 81 | if pycompat.ispy3: |
|
82 | 82 | return _shlexpy3proxy(l) |
|
83 | 83 | return l |
|
84 | 84 | |
|
85 | 85 | |
|
86 | 86 | if pycompat.ispy3: |
|
87 | 87 | base64_encodebytes = base64.encodebytes |
|
88 | 88 | base64_decodebytes = base64.decodebytes |
|
89 | 89 | else: |
|
90 | 90 | base64_encodebytes = base64.encodestring |
|
91 | 91 | base64_decodebytes = base64.decodestring |
|
92 | 92 | |
|
93 | 93 | |
|
94 | 94 | def encodeargs(args): |
|
95 | 95 | def encodearg(s): |
|
96 | 96 | lines = base64_encodebytes(s) |
|
97 | 97 | lines = [l.splitlines()[0] for l in pycompat.iterbytestr(lines)] |
|
98 | 98 | return b''.join(lines) |
|
99 | 99 | |
|
100 | 100 | s = pickle.dumps(args) |
|
101 | 101 | return encodearg(s) |
|
102 | 102 | |
|
103 | 103 | |
|
104 | 104 | def decodeargs(s): |
|
105 | 105 | s = base64_decodebytes(s) |
|
106 | 106 | return pickle.loads(s) |
|
107 | 107 | |
|
108 | 108 | |
|
109 | 109 | class MissingTool(Exception): |
|
110 | 110 | pass |
|
111 | 111 | |
|
112 | 112 | |
|
113 | 113 | def checktool(exe, name=None, abort=True): |
|
114 | 114 | name = name or exe |
|
115 | 115 | if not procutil.findexe(exe): |
|
116 | 116 | if abort: |
|
117 | 117 | exc = error.Abort |
|
118 | 118 | else: |
|
119 | 119 | exc = MissingTool |
|
120 | 120 | raise exc(_(b'cannot find required "%s" tool') % name) |
|
121 | 121 | |
|
122 | 122 | |
|
123 | 123 | class NoRepo(Exception): |
|
124 | 124 | pass |
|
125 | 125 | |
|
126 | 126 | |
|
127 | 127 | SKIPREV = b'SKIP' |
|
128 | 128 | |
|
129 | 129 | |
|
130 | 130 | class commit(object): |
|
131 | 131 | def __init__( |
|
132 | 132 | self, |
|
133 | 133 | author, |
|
134 | 134 | date, |
|
135 | 135 | desc, |
|
136 | 136 | parents, |
|
137 | 137 | branch=None, |
|
138 | 138 | rev=None, |
|
139 | 139 | extra=None, |
|
140 | 140 | sortkey=None, |
|
141 | 141 | saverev=True, |
|
142 | 142 | phase=phases.draft, |
|
143 | 143 | optparents=None, |
|
144 | 144 | ctx=None, |
|
145 | 145 | ): |
|
146 | 146 | self.author = author or b'unknown' |
|
147 | 147 | self.date = date or b'0 0' |
|
148 | 148 | self.desc = desc |
|
149 | 149 | self.parents = parents # will be converted and used as parents |
|
150 | 150 | self.optparents = optparents or [] # will be used if already converted |
|
151 | 151 | self.branch = branch |
|
152 | 152 | self.rev = rev |
|
153 | 153 | self.extra = extra or {} |
|
154 | 154 | self.sortkey = sortkey |
|
155 | 155 | self.saverev = saverev |
|
156 | 156 | self.phase = phase |
|
157 | 157 | self.ctx = ctx # for hg to hg conversions |
|
158 | 158 | |
|
159 | 159 | |
|
160 | 160 | class converter_source(object): |
|
161 | 161 | """Conversion source interface""" |
|
162 | 162 | |
|
163 | 163 | def __init__(self, ui, repotype, path=None, revs=None): |
|
164 | 164 | """Initialize conversion source (or raise NoRepo("message") |
|
165 | 165 | exception if path is not a valid repository)""" |
|
166 | 166 | self.ui = ui |
|
167 | 167 | self.path = path |
|
168 | 168 | self.revs = revs |
|
169 | 169 | self.repotype = repotype |
|
170 | 170 | |
|
171 | 171 | self.encoding = b'utf-8' |
|
172 | 172 | |
|
173 | 173 | def checkhexformat(self, revstr, mapname=b'splicemap'): |
|
174 | 174 | """fails if revstr is not a 40 byte hex. mercurial and git both uses |
|
175 | 175 | such format for their revision numbering |
|
176 | 176 | """ |
|
177 | 177 | if not re.match(br'[0-9a-fA-F]{40,40}$', revstr): |
|
178 | 178 | raise error.Abort( |
|
179 | 179 | _(b'%s entry %s is not a valid revision identifier') |
|
180 | 180 | % (mapname, revstr) |
|
181 | 181 | ) |
|
182 | 182 | |
|
183 | 183 | def before(self): |
|
184 | 184 | pass |
|
185 | 185 | |
|
186 | 186 | def after(self): |
|
187 | 187 | pass |
|
188 | 188 | |
|
189 | 189 | def targetfilebelongstosource(self, targetfilename): |
|
190 | 190 | """Returns true if the given targetfile belongs to the source repo. This |
|
191 | 191 | is useful when only a subdirectory of the target belongs to the source |
|
192 | 192 | repo.""" |
|
193 | 193 | # For normal full repo converts, this is always True. |
|
194 | 194 | return True |
|
195 | 195 | |
|
196 | 196 | def setrevmap(self, revmap): |
|
197 | 197 | """set the map of already-converted revisions""" |
|
198 | 198 | |
|
199 | 199 | def getheads(self): |
|
200 | 200 | """Return a list of this repository's heads""" |
|
201 | 201 | raise NotImplementedError |
|
202 | 202 | |
|
203 | 203 | def getfile(self, name, rev): |
|
204 | 204 | """Return a pair (data, mode) where data is the file content |
|
205 | 205 | as a string and mode one of '', 'x' or 'l'. rev is the |
|
206 | 206 | identifier returned by a previous call to getchanges(). |
|
207 | 207 | Data is None if file is missing/deleted in rev. |
|
208 | 208 | """ |
|
209 | 209 | raise NotImplementedError |
|
210 | 210 | |
|
211 | 211 | def getchanges(self, version, full): |
|
212 | 212 | """Returns a tuple of (files, copies, cleanp2). |
|
213 | 213 | |
|
214 | 214 | files is a sorted list of (filename, id) tuples for all files |
|
215 | 215 | changed between version and its first parent returned by |
|
216 | 216 | getcommit(). If full, all files in that revision is returned. |
|
217 | 217 | id is the source revision id of the file. |
|
218 | 218 | |
|
219 | 219 | copies is a dictionary of dest: source |
|
220 | 220 | |
|
221 | 221 | cleanp2 is the set of files filenames that are clean against p2. |
|
222 | 222 | (Files that are clean against p1 are already not in files (unless |
|
223 | 223 | full). This makes it possible to handle p2 clean files similarly.) |
|
224 | 224 | """ |
|
225 | 225 | raise NotImplementedError |
|
226 | 226 | |
|
227 | 227 | def getcommit(self, version): |
|
228 | 228 | """Return the commit object for version""" |
|
229 | 229 | raise NotImplementedError |
|
230 | 230 | |
|
231 | 231 | def numcommits(self): |
|
232 | 232 | """Return the number of commits in this source. |
|
233 | 233 | |
|
234 | 234 | If unknown, return None. |
|
235 | 235 | """ |
|
236 | 236 | return None |
|
237 | 237 | |
|
238 | 238 | def gettags(self): |
|
239 | 239 | """Return the tags as a dictionary of name: revision |
|
240 | 240 | |
|
241 | 241 | Tag names must be UTF-8 strings. |
|
242 | 242 | """ |
|
243 | 243 | raise NotImplementedError |
|
244 | 244 | |
|
245 | 245 | def recode(self, s, encoding=None): |
|
246 | 246 | if not encoding: |
|
247 | 247 | encoding = self.encoding or b'utf-8' |
|
248 | 248 | |
|
249 | 249 | if isinstance(s, pycompat.unicode): |
|
250 | 250 | return s.encode("utf-8") |
|
251 | 251 | try: |
|
252 | 252 | return s.decode(pycompat.sysstr(encoding)).encode("utf-8") |
|
253 | 253 | except UnicodeError: |
|
254 | 254 | try: |
|
255 | 255 | return s.decode("latin-1").encode("utf-8") |
|
256 | 256 | except UnicodeError: |
|
257 | 257 | return s.decode(pycompat.sysstr(encoding), "replace").encode( |
|
258 | 258 | "utf-8" |
|
259 | 259 | ) |
|
260 | 260 | |
|
261 | 261 | def getchangedfiles(self, rev, i): |
|
262 | 262 | """Return the files changed by rev compared to parent[i]. |
|
263 | 263 | |
|
264 | 264 | i is an index selecting one of the parents of rev. The return |
|
265 | 265 | value should be the list of files that are different in rev and |
|
266 | 266 | this parent. |
|
267 | 267 | |
|
268 | 268 | If rev has no parents, i is None. |
|
269 | 269 | |
|
270 | 270 | This function is only needed to support --filemap |
|
271 | 271 | """ |
|
272 | 272 | raise NotImplementedError |
|
273 | 273 | |
|
274 | 274 | def converted(self, rev, sinkrev): |
|
275 | 275 | '''Notify the source that a revision has been converted.''' |
|
276 | 276 | |
|
277 | 277 | def hasnativeorder(self): |
|
278 | 278 | """Return true if this source has a meaningful, native revision |
|
279 | 279 | order. For instance, Mercurial revisions are store sequentially |
|
280 | 280 | while there is no such global ordering with Darcs. |
|
281 | 281 | """ |
|
282 | 282 | return False |
|
283 | 283 | |
|
284 | 284 | def hasnativeclose(self): |
|
285 | 285 | """Return true if this source has ability to close branch.""" |
|
286 | 286 | return False |
|
287 | 287 | |
|
288 | 288 | def lookuprev(self, rev): |
|
289 | 289 | """If rev is a meaningful revision reference in source, return |
|
290 | 290 | the referenced identifier in the same format used by getcommit(). |
|
291 | 291 | return None otherwise. |
|
292 | 292 | """ |
|
293 | 293 | return None |
|
294 | 294 | |
|
295 | 295 | def getbookmarks(self): |
|
296 | 296 | """Return the bookmarks as a dictionary of name: revision |
|
297 | 297 | |
|
298 | 298 | Bookmark names are to be UTF-8 strings. |
|
299 | 299 | """ |
|
300 | 300 | return {} |
|
301 | 301 | |
|
302 | 302 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
303 | 303 | """revstr is a string that describes a revision in the given |
|
304 | 304 | source control system. Return true if revstr has correct |
|
305 | 305 | format. |
|
306 | 306 | """ |
|
307 | 307 | return True |
|
308 | 308 | |
|
309 | 309 | |
|
310 | 310 | class converter_sink(object): |
|
311 | 311 | """Conversion sink (target) interface""" |
|
312 | 312 | |
|
313 | 313 | def __init__(self, ui, repotype, path): |
|
314 | 314 | """Initialize conversion sink (or raise NoRepo("message") |
|
315 | 315 | exception if path is not a valid repository) |
|
316 | 316 | |
|
317 | 317 | created is a list of paths to remove if a fatal error occurs |
|
318 | 318 | later""" |
|
319 | 319 | self.ui = ui |
|
320 | 320 | self.path = path |
|
321 | 321 | self.created = [] |
|
322 | 322 | self.repotype = repotype |
|
323 | 323 | |
|
324 | 324 | def revmapfile(self): |
|
325 | 325 | """Path to a file that will contain lines |
|
326 | 326 | source_rev_id sink_rev_id |
|
327 | 327 | mapping equivalent revision identifiers for each system.""" |
|
328 | 328 | raise NotImplementedError |
|
329 | 329 | |
|
330 | 330 | def authorfile(self): |
|
331 | 331 | """Path to a file that will contain lines |
|
332 | 332 | srcauthor=dstauthor |
|
333 | 333 | mapping equivalent authors identifiers for each system.""" |
|
334 | 334 | return None |
|
335 | 335 | |
|
336 | 336 | def putcommit( |
|
337 | 337 | self, files, copies, parents, commit, source, revmap, full, cleanp2 |
|
338 | 338 | ): |
|
339 | 339 | """Create a revision with all changed files listed in 'files' |
|
340 | 340 | and having listed parents. 'commit' is a commit object |
|
341 | 341 | containing at a minimum the author, date, and message for this |
|
342 | 342 | changeset. 'files' is a list of (path, version) tuples, |
|
343 | 343 | 'copies' is a dictionary mapping destinations to sources, |
|
344 | 344 | 'source' is the source repository, and 'revmap' is a mapfile |
|
345 | 345 | of source revisions to converted revisions. Only getfile() and |
|
346 | 346 | lookuprev() should be called on 'source'. 'full' means that 'files' |
|
347 | 347 | is complete and all other files should be removed. |
|
348 | 348 | 'cleanp2' is a set of the filenames that are unchanged from p2 |
|
349 | 349 | (only in the common merge case where there two parents). |
|
350 | 350 | |
|
351 | 351 | Note that the sink repository is not told to update itself to |
|
352 | 352 | a particular revision (or even what that revision would be) |
|
353 | 353 | before it receives the file data. |
|
354 | 354 | """ |
|
355 | 355 | raise NotImplementedError |
|
356 | 356 | |
|
357 | 357 | def puttags(self, tags): |
|
358 | 358 | """Put tags into sink. |
|
359 | 359 | |
|
360 | 360 | tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string. |
|
361 | 361 | Return a pair (tag_revision, tag_parent_revision), or (None, None) |
|
362 | 362 | if nothing was changed. |
|
363 | 363 | """ |
|
364 | 364 | raise NotImplementedError |
|
365 | 365 | |
|
366 | 366 | def setbranch(self, branch, pbranches): |
|
367 | 367 | """Set the current branch name. Called before the first putcommit |
|
368 | 368 | on the branch. |
|
369 | 369 | branch: branch name for subsequent commits |
|
370 | 370 | pbranches: (converted parent revision, parent branch) tuples""" |
|
371 | 371 | |
|
372 | 372 | def setfilemapmode(self, active): |
|
373 | 373 | """Tell the destination that we're using a filemap |
|
374 | 374 | |
|
375 | 375 | Some converter_sources (svn in particular) can claim that a file |
|
376 | 376 | was changed in a revision, even if there was no change. This method |
|
377 | 377 | tells the destination that we're using a filemap and that it should |
|
378 | 378 | filter empty revisions. |
|
379 | 379 | """ |
|
380 | 380 | |
|
381 | 381 | def before(self): |
|
382 | 382 | pass |
|
383 | 383 | |
|
384 | 384 | def after(self): |
|
385 | 385 | pass |
|
386 | 386 | |
|
387 | 387 | def putbookmarks(self, bookmarks): |
|
388 | 388 | """Put bookmarks into sink. |
|
389 | 389 | |
|
390 | 390 | bookmarks: {bookmarkname: sink_rev_id, ...} |
|
391 | 391 | where bookmarkname is an UTF-8 string. |
|
392 | 392 | """ |
|
393 | 393 | |
|
394 | 394 | def hascommitfrommap(self, rev): |
|
395 | 395 | """Return False if a rev mentioned in a filemap is known to not be |
|
396 | 396 | present.""" |
|
397 | 397 | raise NotImplementedError |
|
398 | 398 | |
|
399 | 399 | def hascommitforsplicemap(self, rev): |
|
400 | 400 | """This method is for the special needs for splicemap handling and not |
|
401 | 401 | for general use. Returns True if the sink contains rev, aborts on some |
|
402 | 402 | special cases.""" |
|
403 | 403 | raise NotImplementedError |
|
404 | 404 | |
|
405 | 405 | |
|
406 | 406 | class commandline(object): |
|
407 | 407 | def __init__(self, ui, command): |
|
408 | 408 | self.ui = ui |
|
409 | 409 | self.command = command |
|
410 | 410 | |
|
411 | 411 | def prerun(self): |
|
412 | 412 | pass |
|
413 | 413 | |
|
414 | 414 | def postrun(self): |
|
415 | 415 | pass |
|
416 | 416 | |
|
417 | 417 | def _cmdline(self, cmd, *args, **kwargs): |
|
418 | 418 | kwargs = pycompat.byteskwargs(kwargs) |
|
419 | 419 | cmdline = [self.command, cmd] + list(args) |
|
420 |
for k, v in |
|
|
420 | for k, v in kwargs.items(): | |
|
421 | 421 | if len(k) == 1: |
|
422 | 422 | cmdline.append(b'-' + k) |
|
423 | 423 | else: |
|
424 | 424 | cmdline.append(b'--' + k.replace(b'_', b'-')) |
|
425 | 425 | try: |
|
426 | 426 | if len(k) == 1: |
|
427 | 427 | cmdline.append(b'' + v) |
|
428 | 428 | else: |
|
429 | 429 | cmdline[-1] += b'=' + v |
|
430 | 430 | except TypeError: |
|
431 | 431 | pass |
|
432 | 432 | cmdline = [procutil.shellquote(arg) for arg in cmdline] |
|
433 | 433 | if not self.ui.debugflag: |
|
434 | 434 | cmdline += [b'2>', pycompat.bytestr(os.devnull)] |
|
435 | 435 | cmdline = b' '.join(cmdline) |
|
436 | 436 | return cmdline |
|
437 | 437 | |
|
438 | 438 | def _run(self, cmd, *args, **kwargs): |
|
439 | 439 | def popen(cmdline): |
|
440 | 440 | p = subprocess.Popen( |
|
441 | 441 | procutil.tonativestr(cmdline), |
|
442 | 442 | shell=True, |
|
443 | 443 | bufsize=-1, |
|
444 | 444 | close_fds=procutil.closefds, |
|
445 | 445 | stdout=subprocess.PIPE, |
|
446 | 446 | ) |
|
447 | 447 | return p |
|
448 | 448 | |
|
449 | 449 | return self._dorun(popen, cmd, *args, **kwargs) |
|
450 | 450 | |
|
451 | 451 | def _run2(self, cmd, *args, **kwargs): |
|
452 | 452 | return self._dorun(procutil.popen2, cmd, *args, **kwargs) |
|
453 | 453 | |
|
454 | 454 | def _run3(self, cmd, *args, **kwargs): |
|
455 | 455 | return self._dorun(procutil.popen3, cmd, *args, **kwargs) |
|
456 | 456 | |
|
457 | 457 | def _dorun(self, openfunc, cmd, *args, **kwargs): |
|
458 | 458 | cmdline = self._cmdline(cmd, *args, **kwargs) |
|
459 | 459 | self.ui.debug(b'running: %s\n' % (cmdline,)) |
|
460 | 460 | self.prerun() |
|
461 | 461 | try: |
|
462 | 462 | return openfunc(cmdline) |
|
463 | 463 | finally: |
|
464 | 464 | self.postrun() |
|
465 | 465 | |
|
466 | 466 | def run(self, cmd, *args, **kwargs): |
|
467 | 467 | p = self._run(cmd, *args, **kwargs) |
|
468 | 468 | output = p.communicate()[0] |
|
469 | 469 | self.ui.debug(output) |
|
470 | 470 | return output, p.returncode |
|
471 | 471 | |
|
472 | 472 | def runlines(self, cmd, *args, **kwargs): |
|
473 | 473 | p = self._run(cmd, *args, **kwargs) |
|
474 | 474 | output = p.stdout.readlines() |
|
475 | 475 | p.wait() |
|
476 | 476 | self.ui.debug(b''.join(output)) |
|
477 | 477 | return output, p.returncode |
|
478 | 478 | |
|
479 | 479 | def checkexit(self, status, output=b''): |
|
480 | 480 | if status: |
|
481 | 481 | if output: |
|
482 | 482 | self.ui.warn(_(b'%s error:\n') % self.command) |
|
483 | 483 | self.ui.warn(output) |
|
484 | 484 | msg = procutil.explainexit(status) |
|
485 | 485 | raise error.Abort(b'%s %s' % (self.command, msg)) |
|
486 | 486 | |
|
487 | 487 | def run0(self, cmd, *args, **kwargs): |
|
488 | 488 | output, status = self.run(cmd, *args, **kwargs) |
|
489 | 489 | self.checkexit(status, output) |
|
490 | 490 | return output |
|
491 | 491 | |
|
492 | 492 | def runlines0(self, cmd, *args, **kwargs): |
|
493 | 493 | output, status = self.runlines(cmd, *args, **kwargs) |
|
494 | 494 | self.checkexit(status, b''.join(output)) |
|
495 | 495 | return output |
|
496 | 496 | |
|
497 | 497 | @propertycache |
|
498 | 498 | def argmax(self): |
|
499 | 499 | # POSIX requires at least 4096 bytes for ARG_MAX |
|
500 | 500 | argmax = 4096 |
|
501 | 501 | try: |
|
502 | 502 | argmax = os.sysconf("SC_ARG_MAX") |
|
503 | 503 | except (AttributeError, ValueError): |
|
504 | 504 | pass |
|
505 | 505 | |
|
506 | 506 | # Windows shells impose their own limits on command line length, |
|
507 | 507 | # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes |
|
508 | 508 | # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for |
|
509 | 509 | # details about cmd.exe limitations. |
|
510 | 510 | |
|
511 | 511 | # Since ARG_MAX is for command line _and_ environment, lower our limit |
|
512 | 512 | # (and make happy Windows shells while doing this). |
|
513 | 513 | return argmax // 2 - 1 |
|
514 | 514 | |
|
515 | 515 | def _limit_arglist(self, arglist, cmd, *args, **kwargs): |
|
516 | 516 | cmdlen = len(self._cmdline(cmd, *args, **kwargs)) |
|
517 | 517 | limit = self.argmax - cmdlen |
|
518 | 518 | numbytes = 0 |
|
519 | 519 | fl = [] |
|
520 | 520 | for fn in arglist: |
|
521 | 521 | b = len(fn) + 3 |
|
522 | 522 | if numbytes + b < limit or len(fl) == 0: |
|
523 | 523 | fl.append(fn) |
|
524 | 524 | numbytes += b |
|
525 | 525 | else: |
|
526 | 526 | yield fl |
|
527 | 527 | fl = [fn] |
|
528 | 528 | numbytes = b |
|
529 | 529 | if fl: |
|
530 | 530 | yield fl |
|
531 | 531 | |
|
532 | 532 | def xargs(self, arglist, cmd, *args, **kwargs): |
|
533 | 533 | for l in self._limit_arglist(arglist, cmd, *args, **kwargs): |
|
534 | 534 | self.run0(cmd, *(list(args) + l), **kwargs) |
|
535 | 535 | |
|
536 | 536 | |
|
537 | 537 | class mapfile(dict): |
|
538 | 538 | def __init__(self, ui, path): |
|
539 | 539 | super(mapfile, self).__init__() |
|
540 | 540 | self.ui = ui |
|
541 | 541 | self.path = path |
|
542 | 542 | self.fp = None |
|
543 | 543 | self.order = [] |
|
544 | 544 | self._read() |
|
545 | 545 | |
|
546 | 546 | def _read(self): |
|
547 | 547 | if not self.path: |
|
548 | 548 | return |
|
549 | 549 | try: |
|
550 | 550 | fp = open(self.path, b'rb') |
|
551 | 551 | except IOError as err: |
|
552 | 552 | if err.errno != errno.ENOENT: |
|
553 | 553 | raise |
|
554 | 554 | return |
|
555 | 555 | for i, line in enumerate(util.iterfile(fp)): |
|
556 | 556 | line = line.splitlines()[0].rstrip() |
|
557 | 557 | if not line: |
|
558 | 558 | # Ignore blank lines |
|
559 | 559 | continue |
|
560 | 560 | try: |
|
561 | 561 | key, value = line.rsplit(b' ', 1) |
|
562 | 562 | except ValueError: |
|
563 | 563 | raise error.Abort( |
|
564 | 564 | _(b'syntax error in %s(%d): key/value pair expected') |
|
565 | 565 | % (self.path, i + 1) |
|
566 | 566 | ) |
|
567 | 567 | if key not in self: |
|
568 | 568 | self.order.append(key) |
|
569 | 569 | super(mapfile, self).__setitem__(key, value) |
|
570 | 570 | fp.close() |
|
571 | 571 | |
|
572 | 572 | def __setitem__(self, key, value): |
|
573 | 573 | if self.fp is None: |
|
574 | 574 | try: |
|
575 | 575 | self.fp = open(self.path, b'ab') |
|
576 | 576 | except IOError as err: |
|
577 | 577 | raise error.Abort( |
|
578 | 578 | _(b'could not open map file %r: %s') |
|
579 | 579 | % (self.path, encoding.strtolocal(err.strerror)) |
|
580 | 580 | ) |
|
581 | 581 | self.fp.write(util.tonativeeol(b'%s %s\n' % (key, value))) |
|
582 | 582 | self.fp.flush() |
|
583 | 583 | super(mapfile, self).__setitem__(key, value) |
|
584 | 584 | |
|
585 | 585 | def close(self): |
|
586 | 586 | if self.fp: |
|
587 | 587 | self.fp.close() |
|
588 | 588 | self.fp = None |
|
589 | 589 | |
|
590 | 590 | |
|
591 | 591 | def makedatetimestamp(t): |
|
592 | 592 | """Like dateutil.makedate() but for time t instead of current time""" |
|
593 | 593 | delta = datetime.datetime.utcfromtimestamp( |
|
594 | 594 | t |
|
595 | 595 | ) - datetime.datetime.fromtimestamp(t) |
|
596 | 596 | tz = delta.days * 86400 + delta.seconds |
|
597 | 597 | return t, tz |
@@ -1,669 +1,667 b'' | |||
|
1 | 1 | # convcmd - convert extension commands definition |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import collections |
|
9 | 9 | import os |
|
10 | 10 | import shutil |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | from mercurial.pycompat import open |
|
14 | 14 | from mercurial import ( |
|
15 | 15 | encoding, |
|
16 | 16 | error, |
|
17 | 17 | hg, |
|
18 | 18 | pycompat, |
|
19 | 19 | scmutil, |
|
20 | 20 | util, |
|
21 | 21 | ) |
|
22 | 22 | from mercurial.utils import dateutil |
|
23 | 23 | |
|
24 | 24 | from . import ( |
|
25 | 25 | bzr, |
|
26 | 26 | common, |
|
27 | 27 | cvs, |
|
28 | 28 | darcs, |
|
29 | 29 | filemap, |
|
30 | 30 | git, |
|
31 | 31 | gnuarch, |
|
32 | 32 | hg as hgconvert, |
|
33 | 33 | monotone, |
|
34 | 34 | p4, |
|
35 | 35 | subversion, |
|
36 | 36 | ) |
|
37 | 37 | |
|
38 | 38 | mapfile = common.mapfile |
|
39 | 39 | MissingTool = common.MissingTool |
|
40 | 40 | NoRepo = common.NoRepo |
|
41 | 41 | SKIPREV = common.SKIPREV |
|
42 | 42 | |
|
43 | 43 | bzr_source = bzr.bzr_source |
|
44 | 44 | convert_cvs = cvs.convert_cvs |
|
45 | 45 | convert_git = git.convert_git |
|
46 | 46 | darcs_source = darcs.darcs_source |
|
47 | 47 | gnuarch_source = gnuarch.gnuarch_source |
|
48 | 48 | mercurial_sink = hgconvert.mercurial_sink |
|
49 | 49 | mercurial_source = hgconvert.mercurial_source |
|
50 | 50 | monotone_source = monotone.monotone_source |
|
51 | 51 | p4_source = p4.p4_source |
|
52 | 52 | svn_sink = subversion.svn_sink |
|
53 | 53 | svn_source = subversion.svn_source |
|
54 | 54 | |
|
55 | 55 | orig_encoding = b'ascii' |
|
56 | 56 | |
|
57 | 57 | |
|
58 | 58 | def readauthormap(ui, authorfile, authors=None): |
|
59 | 59 | if authors is None: |
|
60 | 60 | authors = {} |
|
61 | 61 | with open(authorfile, b'rb') as afile: |
|
62 | 62 | for line in afile: |
|
63 | 63 | |
|
64 | 64 | line = line.strip() |
|
65 | 65 | if not line or line.startswith(b'#'): |
|
66 | 66 | continue |
|
67 | 67 | |
|
68 | 68 | try: |
|
69 | 69 | srcauthor, dstauthor = line.split(b'=', 1) |
|
70 | 70 | except ValueError: |
|
71 | 71 | msg = _(b'ignoring bad line in author map file %s: %s\n') |
|
72 | 72 | ui.warn(msg % (authorfile, line.rstrip())) |
|
73 | 73 | continue |
|
74 | 74 | |
|
75 | 75 | srcauthor = srcauthor.strip() |
|
76 | 76 | dstauthor = dstauthor.strip() |
|
77 | 77 | if authors.get(srcauthor) in (None, dstauthor): |
|
78 | 78 | msg = _(b'mapping author %s to %s\n') |
|
79 | 79 | ui.debug(msg % (srcauthor, dstauthor)) |
|
80 | 80 | authors[srcauthor] = dstauthor |
|
81 | 81 | continue |
|
82 | 82 | |
|
83 | 83 | m = _(b'overriding mapping for author %s, was %s, will be %s\n') |
|
84 | 84 | ui.status(m % (srcauthor, authors[srcauthor], dstauthor)) |
|
85 | 85 | return authors |
|
86 | 86 | |
|
87 | 87 | |
|
88 | 88 | def recode(s): |
|
89 | 89 | if isinstance(s, pycompat.unicode): |
|
90 | 90 | return s.encode(pycompat.sysstr(orig_encoding), 'replace') |
|
91 | 91 | else: |
|
92 | 92 | return s.decode('utf-8').encode( |
|
93 | 93 | pycompat.sysstr(orig_encoding), 'replace' |
|
94 | 94 | ) |
|
95 | 95 | |
|
96 | 96 | |
|
97 | 97 | def mapbranch(branch, branchmap): |
|
98 | 98 | """ |
|
99 | 99 | >>> bmap = {b'default': b'branch1'} |
|
100 | 100 | >>> for i in [b'', None]: |
|
101 | 101 | ... mapbranch(i, bmap) |
|
102 | 102 | 'branch1' |
|
103 | 103 | 'branch1' |
|
104 | 104 | >>> bmap = {b'None': b'branch2'} |
|
105 | 105 | >>> for i in [b'', None]: |
|
106 | 106 | ... mapbranch(i, bmap) |
|
107 | 107 | 'branch2' |
|
108 | 108 | 'branch2' |
|
109 | 109 | >>> bmap = {b'None': b'branch3', b'default': b'branch4'} |
|
110 | 110 | >>> for i in [b'None', b'', None, b'default', b'branch5']: |
|
111 | 111 | ... mapbranch(i, bmap) |
|
112 | 112 | 'branch3' |
|
113 | 113 | 'branch4' |
|
114 | 114 | 'branch4' |
|
115 | 115 | 'branch4' |
|
116 | 116 | 'branch5' |
|
117 | 117 | """ |
|
118 | 118 | # If branch is None or empty, this commit is coming from the source |
|
119 | 119 | # repository's default branch and destined for the default branch in the |
|
120 | 120 | # destination repository. For such commits, using a literal "default" |
|
121 | 121 | # in branchmap below allows the user to map "default" to an alternate |
|
122 | 122 | # default branch in the destination repository. |
|
123 | 123 | branch = branchmap.get(branch or b'default', branch) |
|
124 | 124 | # At some point we used "None" literal to denote the default branch, |
|
125 | 125 | # attempt to use that for backward compatibility. |
|
126 | 126 | if not branch: |
|
127 | 127 | branch = branchmap.get(b'None', branch) |
|
128 | 128 | return branch |
|
129 | 129 | |
|
130 | 130 | |
|
131 | 131 | source_converters = [ |
|
132 | 132 | (b'cvs', convert_cvs, b'branchsort'), |
|
133 | 133 | (b'git', convert_git, b'branchsort'), |
|
134 | 134 | (b'svn', svn_source, b'branchsort'), |
|
135 | 135 | (b'hg', mercurial_source, b'sourcesort'), |
|
136 | 136 | (b'darcs', darcs_source, b'branchsort'), |
|
137 | 137 | (b'mtn', monotone_source, b'branchsort'), |
|
138 | 138 | (b'gnuarch', gnuarch_source, b'branchsort'), |
|
139 | 139 | (b'bzr', bzr_source, b'branchsort'), |
|
140 | 140 | (b'p4', p4_source, b'branchsort'), |
|
141 | 141 | ] |
|
142 | 142 | |
|
143 | 143 | sink_converters = [ |
|
144 | 144 | (b'hg', mercurial_sink), |
|
145 | 145 | (b'svn', svn_sink), |
|
146 | 146 | ] |
|
147 | 147 | |
|
148 | 148 | |
|
149 | 149 | def convertsource(ui, path, type, revs): |
|
150 | 150 | exceptions = [] |
|
151 | 151 | if type and type not in [s[0] for s in source_converters]: |
|
152 | 152 | raise error.Abort(_(b'%s: invalid source repository type') % type) |
|
153 | 153 | for name, source, sortmode in source_converters: |
|
154 | 154 | try: |
|
155 | 155 | if not type or name == type: |
|
156 | 156 | return source(ui, name, path, revs), sortmode |
|
157 | 157 | except (NoRepo, MissingTool) as inst: |
|
158 | 158 | exceptions.append(inst) |
|
159 | 159 | if not ui.quiet: |
|
160 | 160 | for inst in exceptions: |
|
161 | 161 | ui.write(b"%s\n" % pycompat.bytestr(inst.args[0])) |
|
162 | 162 | raise error.Abort(_(b'%s: missing or unsupported repository') % path) |
|
163 | 163 | |
|
164 | 164 | |
|
165 | 165 | def convertsink(ui, path, type): |
|
166 | 166 | if type and type not in [s[0] for s in sink_converters]: |
|
167 | 167 | raise error.Abort(_(b'%s: invalid destination repository type') % type) |
|
168 | 168 | for name, sink in sink_converters: |
|
169 | 169 | try: |
|
170 | 170 | if not type or name == type: |
|
171 | 171 | return sink(ui, name, path) |
|
172 | 172 | except NoRepo as inst: |
|
173 | 173 | ui.note(_(b"convert: %s\n") % inst) |
|
174 | 174 | except MissingTool as inst: |
|
175 | 175 | raise error.Abort(b'%s\n' % inst) |
|
176 | 176 | raise error.Abort(_(b'%s: unknown repository type') % path) |
|
177 | 177 | |
|
178 | 178 | |
|
179 | 179 | class progresssource(object): |
|
180 | 180 | def __init__(self, ui, source, filecount): |
|
181 | 181 | self.ui = ui |
|
182 | 182 | self.source = source |
|
183 | 183 | self.progress = ui.makeprogress( |
|
184 | 184 | _(b'getting files'), unit=_(b'files'), total=filecount |
|
185 | 185 | ) |
|
186 | 186 | |
|
187 | 187 | def getfile(self, file, rev): |
|
188 | 188 | self.progress.increment(item=file) |
|
189 | 189 | return self.source.getfile(file, rev) |
|
190 | 190 | |
|
191 | 191 | def targetfilebelongstosource(self, targetfilename): |
|
192 | 192 | return self.source.targetfilebelongstosource(targetfilename) |
|
193 | 193 | |
|
194 | 194 | def lookuprev(self, rev): |
|
195 | 195 | return self.source.lookuprev(rev) |
|
196 | 196 | |
|
197 | 197 | def close(self): |
|
198 | 198 | self.progress.complete() |
|
199 | 199 | |
|
200 | 200 | |
|
201 | 201 | class converter(object): |
|
202 | 202 | def __init__(self, ui, source, dest, revmapfile, opts): |
|
203 | 203 | |
|
204 | 204 | self.source = source |
|
205 | 205 | self.dest = dest |
|
206 | 206 | self.ui = ui |
|
207 | 207 | self.opts = opts |
|
208 | 208 | self.commitcache = {} |
|
209 | 209 | self.authors = {} |
|
210 | 210 | self.authorfile = None |
|
211 | 211 | |
|
212 | 212 | # Record converted revisions persistently: maps source revision |
|
213 | 213 | # ID to target revision ID (both strings). (This is how |
|
214 | 214 | # incremental conversions work.) |
|
215 | 215 | self.map = mapfile(ui, revmapfile) |
|
216 | 216 | |
|
217 | 217 | # Read first the dst author map if any |
|
218 | 218 | authorfile = self.dest.authorfile() |
|
219 | 219 | if authorfile and os.path.exists(authorfile): |
|
220 | 220 | self.readauthormap(authorfile) |
|
221 | 221 | # Extend/Override with new author map if necessary |
|
222 | 222 | if opts.get(b'authormap'): |
|
223 | 223 | self.readauthormap(opts.get(b'authormap')) |
|
224 | 224 | self.authorfile = self.dest.authorfile() |
|
225 | 225 | |
|
226 | 226 | self.splicemap = self.parsesplicemap(opts.get(b'splicemap')) |
|
227 | 227 | self.branchmap = mapfile(ui, opts.get(b'branchmap')) |
|
228 | 228 | |
|
229 | 229 | def parsesplicemap(self, path): |
|
230 | 230 | """check and validate the splicemap format and |
|
231 | 231 | return a child/parents dictionary. |
|
232 | 232 | Format checking has two parts. |
|
233 | 233 | 1. generic format which is same across all source types |
|
234 | 234 | 2. specific format checking which may be different for |
|
235 | 235 | different source type. This logic is implemented in |
|
236 | 236 | checkrevformat function in source files like |
|
237 | 237 | hg.py, subversion.py etc. |
|
238 | 238 | """ |
|
239 | 239 | |
|
240 | 240 | if not path: |
|
241 | 241 | return {} |
|
242 | 242 | m = {} |
|
243 | 243 | try: |
|
244 | 244 | fp = open(path, b'rb') |
|
245 | 245 | for i, line in enumerate(util.iterfile(fp)): |
|
246 | 246 | line = line.splitlines()[0].rstrip() |
|
247 | 247 | if not line: |
|
248 | 248 | # Ignore blank lines |
|
249 | 249 | continue |
|
250 | 250 | # split line |
|
251 | 251 | lex = common.shlexer(data=line, whitespace=b',') |
|
252 | 252 | line = list(lex) |
|
253 | 253 | # check number of parents |
|
254 | 254 | if not (2 <= len(line) <= 3): |
|
255 | 255 | raise error.Abort( |
|
256 | 256 | _( |
|
257 | 257 | b'syntax error in %s(%d): child parent1' |
|
258 | 258 | b'[,parent2] expected' |
|
259 | 259 | ) |
|
260 | 260 | % (path, i + 1) |
|
261 | 261 | ) |
|
262 | 262 | for part in line: |
|
263 | 263 | self.source.checkrevformat(part) |
|
264 | 264 | child, p1, p2 = line[0], line[1:2], line[2:] |
|
265 | 265 | if p1 == p2: |
|
266 | 266 | m[child] = p1 |
|
267 | 267 | else: |
|
268 | 268 | m[child] = p1 + p2 |
|
269 | 269 | # if file does not exist or error reading, exit |
|
270 | 270 | except IOError: |
|
271 | 271 | raise error.Abort( |
|
272 | 272 | _(b'splicemap file not found or error reading %s:') % path |
|
273 | 273 | ) |
|
274 | 274 | return m |
|
275 | 275 | |
|
276 | 276 | def walktree(self, heads): |
|
277 | 277 | """Return a mapping that identifies the uncommitted parents of every |
|
278 | 278 | uncommitted changeset.""" |
|
279 | 279 | visit = list(heads) |
|
280 | 280 | known = set() |
|
281 | 281 | parents = {} |
|
282 | 282 | numcommits = self.source.numcommits() |
|
283 | 283 | progress = self.ui.makeprogress( |
|
284 | 284 | _(b'scanning'), unit=_(b'revisions'), total=numcommits |
|
285 | 285 | ) |
|
286 | 286 | while visit: |
|
287 | 287 | n = visit.pop(0) |
|
288 | 288 | if n in known: |
|
289 | 289 | continue |
|
290 | 290 | if n in self.map: |
|
291 | 291 | m = self.map[n] |
|
292 | 292 | if m == SKIPREV or self.dest.hascommitfrommap(m): |
|
293 | 293 | continue |
|
294 | 294 | known.add(n) |
|
295 | 295 | progress.update(len(known)) |
|
296 | 296 | commit = self.cachecommit(n) |
|
297 | 297 | parents[n] = [] |
|
298 | 298 | for p in commit.parents: |
|
299 | 299 | parents[n].append(p) |
|
300 | 300 | visit.append(p) |
|
301 | 301 | progress.complete() |
|
302 | 302 | |
|
303 | 303 | return parents |
|
304 | 304 | |
|
305 | 305 | def mergesplicemap(self, parents, splicemap): |
|
306 | 306 | """A splicemap redefines child/parent relationships. Check the |
|
307 | 307 | map contains valid revision identifiers and merge the new |
|
308 | 308 | links in the source graph. |
|
309 | 309 | """ |
|
310 | 310 | for c in sorted(splicemap): |
|
311 | 311 | if c not in parents: |
|
312 | 312 | if not self.dest.hascommitforsplicemap(self.map.get(c, c)): |
|
313 | 313 | # Could be in source but not converted during this run |
|
314 | 314 | self.ui.warn( |
|
315 | 315 | _( |
|
316 | 316 | b'splice map revision %s is not being ' |
|
317 | 317 | b'converted, ignoring\n' |
|
318 | 318 | ) |
|
319 | 319 | % c |
|
320 | 320 | ) |
|
321 | 321 | continue |
|
322 | 322 | pc = [] |
|
323 | 323 | for p in splicemap[c]: |
|
324 | 324 | # We do not have to wait for nodes already in dest. |
|
325 | 325 | if self.dest.hascommitforsplicemap(self.map.get(p, p)): |
|
326 | 326 | continue |
|
327 | 327 | # Parent is not in dest and not being converted, not good |
|
328 | 328 | if p not in parents: |
|
329 | 329 | raise error.Abort(_(b'unknown splice map parent: %s') % p) |
|
330 | 330 | pc.append(p) |
|
331 | 331 | parents[c] = pc |
|
332 | 332 | |
|
333 | 333 | def toposort(self, parents, sortmode): |
|
334 | 334 | """Return an ordering such that every uncommitted changeset is |
|
335 | 335 | preceded by all its uncommitted ancestors.""" |
|
336 | 336 | |
|
337 | 337 | def mapchildren(parents): |
|
338 | 338 | """Return a (children, roots) tuple where 'children' maps parent |
|
339 | 339 | revision identifiers to children ones, and 'roots' is the list of |
|
340 | 340 | revisions without parents. 'parents' must be a mapping of revision |
|
341 | 341 | identifier to its parents ones. |
|
342 | 342 | """ |
|
343 | 343 | visit = collections.deque(sorted(parents)) |
|
344 | 344 | seen = set() |
|
345 | 345 | children = {} |
|
346 | 346 | roots = [] |
|
347 | 347 | |
|
348 | 348 | while visit: |
|
349 | 349 | n = visit.popleft() |
|
350 | 350 | if n in seen: |
|
351 | 351 | continue |
|
352 | 352 | seen.add(n) |
|
353 | 353 | # Ensure that nodes without parents are present in the |
|
354 | 354 | # 'children' mapping. |
|
355 | 355 | children.setdefault(n, []) |
|
356 | 356 | hasparent = False |
|
357 | 357 | for p in parents[n]: |
|
358 | 358 | if p not in self.map: |
|
359 | 359 | visit.append(p) |
|
360 | 360 | hasparent = True |
|
361 | 361 | children.setdefault(p, []).append(n) |
|
362 | 362 | if not hasparent: |
|
363 | 363 | roots.append(n) |
|
364 | 364 | |
|
365 | 365 | return children, roots |
|
366 | 366 | |
|
367 | 367 | # Sort functions are supposed to take a list of revisions which |
|
368 | 368 | # can be converted immediately and pick one |
|
369 | 369 | |
|
370 | 370 | def makebranchsorter(): |
|
371 | 371 | """If the previously converted revision has a child in the |
|
372 | 372 | eligible revisions list, pick it. Return the list head |
|
373 | 373 | otherwise. Branch sort attempts to minimize branch |
|
374 | 374 | switching, which is harmful for Mercurial backend |
|
375 | 375 | compression. |
|
376 | 376 | """ |
|
377 | 377 | prev = [None] |
|
378 | 378 | |
|
379 | 379 | def picknext(nodes): |
|
380 | 380 | next = nodes[0] |
|
381 | 381 | for n in nodes: |
|
382 | 382 | if prev[0] in parents[n]: |
|
383 | 383 | next = n |
|
384 | 384 | break |
|
385 | 385 | prev[0] = next |
|
386 | 386 | return next |
|
387 | 387 | |
|
388 | 388 | return picknext |
|
389 | 389 | |
|
390 | 390 | def makesourcesorter(): |
|
391 | 391 | """Source specific sort.""" |
|
392 | 392 | keyfn = lambda n: self.commitcache[n].sortkey |
|
393 | 393 | |
|
394 | 394 | def picknext(nodes): |
|
395 | 395 | return sorted(nodes, key=keyfn)[0] |
|
396 | 396 | |
|
397 | 397 | return picknext |
|
398 | 398 | |
|
399 | 399 | def makeclosesorter(): |
|
400 | 400 | """Close order sort.""" |
|
401 | 401 | keyfn = lambda n: ( |
|
402 | 402 | b'close' not in self.commitcache[n].extra, |
|
403 | 403 | self.commitcache[n].sortkey, |
|
404 | 404 | ) |
|
405 | 405 | |
|
406 | 406 | def picknext(nodes): |
|
407 | 407 | return sorted(nodes, key=keyfn)[0] |
|
408 | 408 | |
|
409 | 409 | return picknext |
|
410 | 410 | |
|
411 | 411 | def makedatesorter(): |
|
412 | 412 | """Sort revisions by date.""" |
|
413 | 413 | dates = {} |
|
414 | 414 | |
|
415 | 415 | def getdate(n): |
|
416 | 416 | if n not in dates: |
|
417 | 417 | dates[n] = dateutil.parsedate(self.commitcache[n].date) |
|
418 | 418 | return dates[n] |
|
419 | 419 | |
|
420 | 420 | def picknext(nodes): |
|
421 | 421 | return min([(getdate(n), n) for n in nodes])[1] |
|
422 | 422 | |
|
423 | 423 | return picknext |
|
424 | 424 | |
|
425 | 425 | if sortmode == b'branchsort': |
|
426 | 426 | picknext = makebranchsorter() |
|
427 | 427 | elif sortmode == b'datesort': |
|
428 | 428 | picknext = makedatesorter() |
|
429 | 429 | elif sortmode == b'sourcesort': |
|
430 | 430 | picknext = makesourcesorter() |
|
431 | 431 | elif sortmode == b'closesort': |
|
432 | 432 | picknext = makeclosesorter() |
|
433 | 433 | else: |
|
434 | 434 | raise error.Abort(_(b'unknown sort mode: %s') % sortmode) |
|
435 | 435 | |
|
436 | 436 | children, actives = mapchildren(parents) |
|
437 | 437 | |
|
438 | 438 | s = [] |
|
439 | 439 | pendings = {} |
|
440 | 440 | while actives: |
|
441 | 441 | n = picknext(actives) |
|
442 | 442 | actives.remove(n) |
|
443 | 443 | s.append(n) |
|
444 | 444 | |
|
445 | 445 | # Update dependents list |
|
446 | 446 | for c in children.get(n, []): |
|
447 | 447 | if c not in pendings: |
|
448 | 448 | pendings[c] = [p for p in parents[c] if p not in self.map] |
|
449 | 449 | try: |
|
450 | 450 | pendings[c].remove(n) |
|
451 | 451 | except ValueError: |
|
452 | 452 | raise error.Abort( |
|
453 | 453 | _(b'cycle detected between %s and %s') |
|
454 | 454 | % (recode(c), recode(n)) |
|
455 | 455 | ) |
|
456 | 456 | if not pendings[c]: |
|
457 | 457 | # Parents are converted, node is eligible |
|
458 | 458 | actives.insert(0, c) |
|
459 | 459 | pendings[c] = None |
|
460 | 460 | |
|
461 | 461 | if len(s) != len(parents): |
|
462 | 462 | raise error.Abort(_(b"not all revisions were sorted")) |
|
463 | 463 | |
|
464 | 464 | return s |
|
465 | 465 | |
|
466 | 466 | def writeauthormap(self): |
|
467 | 467 | authorfile = self.authorfile |
|
468 | 468 | if authorfile: |
|
469 | 469 | self.ui.status(_(b'writing author map file %s\n') % authorfile) |
|
470 | 470 | ofile = open(authorfile, b'wb+') |
|
471 | 471 | for author in self.authors: |
|
472 | 472 | ofile.write( |
|
473 | 473 | util.tonativeeol( |
|
474 | 474 | b"%s=%s\n" % (author, self.authors[author]) |
|
475 | 475 | ) |
|
476 | 476 | ) |
|
477 | 477 | ofile.close() |
|
478 | 478 | |
|
479 | 479 | def readauthormap(self, authorfile): |
|
480 | 480 | self.authors = readauthormap(self.ui, authorfile, self.authors) |
|
481 | 481 | |
|
482 | 482 | def cachecommit(self, rev): |
|
483 | 483 | commit = self.source.getcommit(rev) |
|
484 | 484 | commit.author = self.authors.get(commit.author, commit.author) |
|
485 | 485 | commit.branch = mapbranch(commit.branch, self.branchmap) |
|
486 | 486 | self.commitcache[rev] = commit |
|
487 | 487 | return commit |
|
488 | 488 | |
|
489 | 489 | def copy(self, rev): |
|
490 | 490 | commit = self.commitcache[rev] |
|
491 | 491 | full = self.opts.get(b'full') |
|
492 | 492 | changes = self.source.getchanges(rev, full) |
|
493 | 493 | if isinstance(changes, bytes): |
|
494 | 494 | if changes == SKIPREV: |
|
495 | 495 | dest = SKIPREV |
|
496 | 496 | else: |
|
497 | 497 | dest = self.map[changes] |
|
498 | 498 | self.map[rev] = dest |
|
499 | 499 | return |
|
500 | 500 | files, copies, cleanp2 = changes |
|
501 | 501 | pbranches = [] |
|
502 | 502 | if commit.parents: |
|
503 | 503 | for prev in commit.parents: |
|
504 | 504 | if prev not in self.commitcache: |
|
505 | 505 | self.cachecommit(prev) |
|
506 | 506 | pbranches.append( |
|
507 | 507 | (self.map[prev], self.commitcache[prev].branch) |
|
508 | 508 | ) |
|
509 | 509 | self.dest.setbranch(commit.branch, pbranches) |
|
510 | 510 | try: |
|
511 | 511 | parents = self.splicemap[rev] |
|
512 | 512 | self.ui.status( |
|
513 | 513 | _(b'spliced in %s as parents of %s\n') |
|
514 | 514 | % (_(b' and ').join(parents), rev) |
|
515 | 515 | ) |
|
516 | 516 | parents = [self.map.get(p, p) for p in parents] |
|
517 | 517 | except KeyError: |
|
518 | 518 | parents = [b[0] for b in pbranches] |
|
519 | 519 | parents.extend( |
|
520 | 520 | self.map[x] for x in commit.optparents if x in self.map |
|
521 | 521 | ) |
|
522 | 522 | if len(pbranches) != 2: |
|
523 | 523 | cleanp2 = set() |
|
524 | 524 | if len(parents) < 3: |
|
525 | 525 | source = progresssource(self.ui, self.source, len(files)) |
|
526 | 526 | else: |
|
527 | 527 | # For an octopus merge, we end up traversing the list of |
|
528 | 528 | # changed files N-1 times. This tweak to the number of |
|
529 | 529 | # files makes it so the progress bar doesn't overflow |
|
530 | 530 | # itself. |
|
531 | 531 | source = progresssource( |
|
532 | 532 | self.ui, self.source, len(files) * (len(parents) - 1) |
|
533 | 533 | ) |
|
534 | 534 | newnode = self.dest.putcommit( |
|
535 | 535 | files, copies, parents, commit, source, self.map, full, cleanp2 |
|
536 | 536 | ) |
|
537 | 537 | source.close() |
|
538 | 538 | self.source.converted(rev, newnode) |
|
539 | 539 | self.map[rev] = newnode |
|
540 | 540 | |
|
541 | 541 | def convert(self, sortmode): |
|
542 | 542 | try: |
|
543 | 543 | self.source.before() |
|
544 | 544 | self.dest.before() |
|
545 | 545 | self.source.setrevmap(self.map) |
|
546 | 546 | self.ui.status(_(b"scanning source...\n")) |
|
547 | 547 | heads = self.source.getheads() |
|
548 | 548 | parents = self.walktree(heads) |
|
549 | 549 | self.mergesplicemap(parents, self.splicemap) |
|
550 | 550 | self.ui.status(_(b"sorting...\n")) |
|
551 | 551 | t = self.toposort(parents, sortmode) |
|
552 | 552 | num = len(t) |
|
553 | 553 | c = None |
|
554 | 554 | |
|
555 | 555 | self.ui.status(_(b"converting...\n")) |
|
556 | 556 | progress = self.ui.makeprogress( |
|
557 | 557 | _(b'converting'), unit=_(b'revisions'), total=len(t) |
|
558 | 558 | ) |
|
559 | 559 | for i, c in enumerate(t): |
|
560 | 560 | num -= 1 |
|
561 | 561 | desc = self.commitcache[c].desc |
|
562 | 562 | if b"\n" in desc: |
|
563 | 563 | desc = desc.splitlines()[0] |
|
564 | 564 | # convert log message to local encoding without using |
|
565 | 565 | # tolocal() because the encoding.encoding convert() |
|
566 | 566 | # uses is 'utf-8' |
|
567 | 567 | self.ui.status(b"%d %s\n" % (num, recode(desc))) |
|
568 | 568 | self.ui.note(_(b"source: %s\n") % recode(c)) |
|
569 | 569 | progress.update(i) |
|
570 | 570 | self.copy(c) |
|
571 | 571 | progress.complete() |
|
572 | 572 | |
|
573 | 573 | if not self.ui.configbool(b'convert', b'skiptags'): |
|
574 | 574 | tags = self.source.gettags() |
|
575 | 575 | ctags = {} |
|
576 | 576 | for k in tags: |
|
577 | 577 | v = tags[k] |
|
578 | 578 | if self.map.get(v, SKIPREV) != SKIPREV: |
|
579 | 579 | ctags[k] = self.map[v] |
|
580 | 580 | |
|
581 | 581 | if c and ctags: |
|
582 | 582 | nrev, tagsparent = self.dest.puttags(ctags) |
|
583 | 583 | if nrev and tagsparent: |
|
584 | 584 | # write another hash correspondence to override the |
|
585 | 585 | # previous one so we don't end up with extra tag heads |
|
586 | 586 | tagsparents = [ |
|
587 | e | |
|
588 | for e in pycompat.iteritems(self.map) | |
|
589 | if e[1] == tagsparent | |
|
587 | e for e in self.map.items() if e[1] == tagsparent | |
|
590 | 588 | ] |
|
591 | 589 | if tagsparents: |
|
592 | 590 | self.map[tagsparents[0][0]] = nrev |
|
593 | 591 | |
|
594 | 592 | bookmarks = self.source.getbookmarks() |
|
595 | 593 | cbookmarks = {} |
|
596 | 594 | for k in bookmarks: |
|
597 | 595 | v = bookmarks[k] |
|
598 | 596 | if self.map.get(v, SKIPREV) != SKIPREV: |
|
599 | 597 | cbookmarks[k] = self.map[v] |
|
600 | 598 | |
|
601 | 599 | if c and cbookmarks: |
|
602 | 600 | self.dest.putbookmarks(cbookmarks) |
|
603 | 601 | |
|
604 | 602 | self.writeauthormap() |
|
605 | 603 | finally: |
|
606 | 604 | self.cleanup() |
|
607 | 605 | |
|
608 | 606 | def cleanup(self): |
|
609 | 607 | try: |
|
610 | 608 | self.dest.after() |
|
611 | 609 | finally: |
|
612 | 610 | self.source.after() |
|
613 | 611 | self.map.close() |
|
614 | 612 | |
|
615 | 613 | |
|
616 | 614 | def convert(ui, src, dest=None, revmapfile=None, **opts): |
|
617 | 615 | opts = pycompat.byteskwargs(opts) |
|
618 | 616 | global orig_encoding |
|
619 | 617 | orig_encoding = encoding.encoding |
|
620 | 618 | encoding.encoding = b'UTF-8' |
|
621 | 619 | |
|
622 | 620 | # support --authors as an alias for --authormap |
|
623 | 621 | if not opts.get(b'authormap'): |
|
624 | 622 | opts[b'authormap'] = opts.get(b'authors') |
|
625 | 623 | |
|
626 | 624 | if not dest: |
|
627 | 625 | dest = hg.defaultdest(src) + b"-hg" |
|
628 | 626 | ui.status(_(b"assuming destination %s\n") % dest) |
|
629 | 627 | |
|
630 | 628 | destc = convertsink(ui, dest, opts.get(b'dest_type')) |
|
631 | 629 | destc = scmutil.wrapconvertsink(destc) |
|
632 | 630 | |
|
633 | 631 | try: |
|
634 | 632 | srcc, defaultsort = convertsource( |
|
635 | 633 | ui, src, opts.get(b'source_type'), opts.get(b'rev') |
|
636 | 634 | ) |
|
637 | 635 | except Exception: |
|
638 | 636 | for path in destc.created: |
|
639 | 637 | shutil.rmtree(path, True) |
|
640 | 638 | raise |
|
641 | 639 | |
|
642 | 640 | sortmodes = (b'branchsort', b'datesort', b'sourcesort', b'closesort') |
|
643 | 641 | sortmode = [m for m in sortmodes if opts.get(m)] |
|
644 | 642 | if len(sortmode) > 1: |
|
645 | 643 | raise error.Abort(_(b'more than one sort mode specified')) |
|
646 | 644 | if sortmode: |
|
647 | 645 | sortmode = sortmode[0] |
|
648 | 646 | else: |
|
649 | 647 | sortmode = defaultsort |
|
650 | 648 | |
|
651 | 649 | if sortmode == b'sourcesort' and not srcc.hasnativeorder(): |
|
652 | 650 | raise error.Abort( |
|
653 | 651 | _(b'--sourcesort is not supported by this data source') |
|
654 | 652 | ) |
|
655 | 653 | if sortmode == b'closesort' and not srcc.hasnativeclose(): |
|
656 | 654 | raise error.Abort( |
|
657 | 655 | _(b'--closesort is not supported by this data source') |
|
658 | 656 | ) |
|
659 | 657 | |
|
660 | 658 | fmap = opts.get(b'filemap') |
|
661 | 659 | if fmap: |
|
662 | 660 | srcc = filemap.filemap_source(ui, srcc, fmap) |
|
663 | 661 | destc.setfilemapmode(True) |
|
664 | 662 | |
|
665 | 663 | if not revmapfile: |
|
666 | 664 | revmapfile = destc.revmapfile() |
|
667 | 665 | |
|
668 | 666 | c = converter(ui, srcc, destc, revmapfile, opts) |
|
669 | 667 | c.convert(sortmode) |
@@ -1,1068 +1,1068 b'' | |||
|
1 | 1 | # Mercurial built-in replacement for cvsps. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import functools |
|
9 | 9 | import os |
|
10 | 10 | import pickle |
|
11 | 11 | import re |
|
12 | 12 | |
|
13 | 13 | from mercurial.i18n import _ |
|
14 | 14 | from mercurial.pycompat import open |
|
15 | 15 | from mercurial import ( |
|
16 | 16 | encoding, |
|
17 | 17 | error, |
|
18 | 18 | hook, |
|
19 | 19 | pycompat, |
|
20 | 20 | util, |
|
21 | 21 | ) |
|
22 | 22 | from mercurial.utils import ( |
|
23 | 23 | dateutil, |
|
24 | 24 | procutil, |
|
25 | 25 | stringutil, |
|
26 | 26 | ) |
|
27 | 27 | |
|
28 | 28 | |
|
29 | 29 | class logentry(object): |
|
30 | 30 | """Class logentry has the following attributes: |
|
31 | 31 | .author - author name as CVS knows it |
|
32 | 32 | .branch - name of branch this revision is on |
|
33 | 33 | .branches - revision tuple of branches starting at this revision |
|
34 | 34 | .comment - commit message |
|
35 | 35 | .commitid - CVS commitid or None |
|
36 | 36 | .date - the commit date as a (time, tz) tuple |
|
37 | 37 | .dead - true if file revision is dead |
|
38 | 38 | .file - Name of file |
|
39 | 39 | .lines - a tuple (+lines, -lines) or None |
|
40 | 40 | .parent - Previous revision of this entry |
|
41 | 41 | .rcs - name of file as returned from CVS |
|
42 | 42 | .revision - revision number as tuple |
|
43 | 43 | .tags - list of tags on the file |
|
44 | 44 | .synthetic - is this a synthetic "file ... added on ..." revision? |
|
45 | 45 | .mergepoint - the branch that has been merged from (if present in |
|
46 | 46 | rlog output) or None |
|
47 | 47 | .branchpoints - the branches that start at the current entry or empty |
|
48 | 48 | """ |
|
49 | 49 | |
|
50 | 50 | def __init__(self, **entries): |
|
51 | 51 | self.synthetic = False |
|
52 | 52 | self.__dict__.update(entries) |
|
53 | 53 | |
|
54 | 54 | def __repr__(self): |
|
55 | 55 | items = ("%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)) |
|
56 | 56 | return "%s(%s)" % (type(self).__name__, ", ".join(items)) |
|
57 | 57 | |
|
58 | 58 | |
|
59 | 59 | class logerror(Exception): |
|
60 | 60 | pass |
|
61 | 61 | |
|
62 | 62 | |
|
63 | 63 | def getrepopath(cvspath): |
|
64 | 64 | """Return the repository path from a CVS path. |
|
65 | 65 | |
|
66 | 66 | >>> getrepopath(b'/foo/bar') |
|
67 | 67 | '/foo/bar' |
|
68 | 68 | >>> getrepopath(b'c:/foo/bar') |
|
69 | 69 | '/foo/bar' |
|
70 | 70 | >>> getrepopath(b':pserver:10/foo/bar') |
|
71 | 71 | '/foo/bar' |
|
72 | 72 | >>> getrepopath(b':pserver:10c:/foo/bar') |
|
73 | 73 | '/foo/bar' |
|
74 | 74 | >>> getrepopath(b':pserver:/foo/bar') |
|
75 | 75 | '/foo/bar' |
|
76 | 76 | >>> getrepopath(b':pserver:c:/foo/bar') |
|
77 | 77 | '/foo/bar' |
|
78 | 78 | >>> getrepopath(b':pserver:truc@foo.bar:/foo/bar') |
|
79 | 79 | '/foo/bar' |
|
80 | 80 | >>> getrepopath(b':pserver:truc@foo.bar:c:/foo/bar') |
|
81 | 81 | '/foo/bar' |
|
82 | 82 | >>> getrepopath(b'user@server/path/to/repository') |
|
83 | 83 | '/path/to/repository' |
|
84 | 84 | """ |
|
85 | 85 | # According to CVS manual, CVS paths are expressed like: |
|
86 | 86 | # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository |
|
87 | 87 | # |
|
88 | 88 | # CVSpath is splitted into parts and then position of the first occurrence |
|
89 | 89 | # of the '/' char after the '@' is located. The solution is the rest of the |
|
90 | 90 | # string after that '/' sign including it |
|
91 | 91 | |
|
92 | 92 | parts = cvspath.split(b':') |
|
93 | 93 | atposition = parts[-1].find(b'@') |
|
94 | 94 | start = 0 |
|
95 | 95 | |
|
96 | 96 | if atposition != -1: |
|
97 | 97 | start = atposition |
|
98 | 98 | |
|
99 | 99 | repopath = parts[-1][parts[-1].find(b'/', start) :] |
|
100 | 100 | return repopath |
|
101 | 101 | |
|
102 | 102 | |
|
103 | 103 | def createlog(ui, directory=None, root=b"", rlog=True, cache=None): |
|
104 | 104 | '''Collect the CVS rlog''' |
|
105 | 105 | |
|
106 | 106 | # Because we store many duplicate commit log messages, reusing strings |
|
107 | 107 | # saves a lot of memory and pickle storage space. |
|
108 | 108 | _scache = {} |
|
109 | 109 | |
|
110 | 110 | def scache(s): |
|
111 | 111 | """return a shared version of a string""" |
|
112 | 112 | return _scache.setdefault(s, s) |
|
113 | 113 | |
|
114 | 114 | ui.status(_(b'collecting CVS rlog\n')) |
|
115 | 115 | |
|
116 | 116 | log = [] # list of logentry objects containing the CVS state |
|
117 | 117 | |
|
118 | 118 | # patterns to match in CVS (r)log output, by state of use |
|
119 | 119 | re_00 = re.compile(b'RCS file: (.+)$') |
|
120 | 120 | re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$') |
|
121 | 121 | re_02 = re.compile(b'cvs (r?log|server): (.+)\n$') |
|
122 | 122 | re_03 = re.compile( |
|
123 | 123 | b"(Cannot access.+CVSROOT)|(can't create temporary directory.+)$" |
|
124 | 124 | ) |
|
125 | 125 | re_10 = re.compile(b'Working file: (.+)$') |
|
126 | 126 | re_20 = re.compile(b'symbolic names:') |
|
127 | 127 | re_30 = re.compile(b'\t(.+): ([\\d.]+)$') |
|
128 | 128 | re_31 = re.compile(b'----------------------------$') |
|
129 | 129 | re_32 = re.compile( |
|
130 | 130 | b'=======================================' |
|
131 | 131 | b'======================================$' |
|
132 | 132 | ) |
|
133 | 133 | re_50 = re.compile(br'revision ([\d.]+)(\s+locked by:\s+.+;)?$') |
|
134 | 134 | re_60 = re.compile( |
|
135 | 135 | br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' |
|
136 | 136 | br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' |
|
137 | 137 | br'(\s+commitid:\s+([^;]+);)?' |
|
138 | 138 | br'(.*mergepoint:\s+([^;]+);)?' |
|
139 | 139 | ) |
|
140 | 140 | re_70 = re.compile(b'branches: (.+);$') |
|
141 | 141 | |
|
142 | 142 | file_added_re = re.compile(br'file [^/]+ was (initially )?added on branch') |
|
143 | 143 | |
|
144 | 144 | prefix = b'' # leading path to strip of what we get from CVS |
|
145 | 145 | |
|
146 | 146 | if directory is None: |
|
147 | 147 | # Current working directory |
|
148 | 148 | |
|
149 | 149 | # Get the real directory in the repository |
|
150 | 150 | try: |
|
151 | 151 | with open(os.path.join(b'CVS', b'Repository'), b'rb') as f: |
|
152 | 152 | prefix = f.read().strip() |
|
153 | 153 | directory = prefix |
|
154 | 154 | if prefix == b".": |
|
155 | 155 | prefix = b"" |
|
156 | 156 | except IOError: |
|
157 | 157 | raise logerror(_(b'not a CVS sandbox')) |
|
158 | 158 | |
|
159 | 159 | if prefix and not prefix.endswith(pycompat.ossep): |
|
160 | 160 | prefix += pycompat.ossep |
|
161 | 161 | |
|
162 | 162 | # Use the Root file in the sandbox, if it exists |
|
163 | 163 | try: |
|
164 | 164 | root = open(os.path.join(b'CVS', b'Root'), b'rb').read().strip() |
|
165 | 165 | except IOError: |
|
166 | 166 | pass |
|
167 | 167 | |
|
168 | 168 | if not root: |
|
169 | 169 | root = encoding.environ.get(b'CVSROOT', b'') |
|
170 | 170 | |
|
171 | 171 | # read log cache if one exists |
|
172 | 172 | oldlog = [] |
|
173 | 173 | date = None |
|
174 | 174 | |
|
175 | 175 | if cache: |
|
176 | 176 | cachedir = os.path.expanduser(b'~/.hg.cvsps') |
|
177 | 177 | if not os.path.exists(cachedir): |
|
178 | 178 | os.mkdir(cachedir) |
|
179 | 179 | |
|
180 | 180 | # The cvsps cache pickle needs a uniquified name, based on the |
|
181 | 181 | # repository location. The address may have all sort of nasties |
|
182 | 182 | # in it, slashes, colons and such. So here we take just the |
|
183 | 183 | # alphanumeric characters, concatenated in a way that does not |
|
184 | 184 | # mix up the various components, so that |
|
185 | 185 | # :pserver:user@server:/path |
|
186 | 186 | # and |
|
187 | 187 | # /pserver/user/server/path |
|
188 | 188 | # are mapped to different cache file names. |
|
189 | 189 | cachefile = root.split(b":") + [directory, b"cache"] |
|
190 | 190 | cachefile = [b'-'.join(re.findall(br'\w+', s)) for s in cachefile if s] |
|
191 | 191 | cachefile = os.path.join( |
|
192 | 192 | cachedir, b'.'.join([s for s in cachefile if s]) |
|
193 | 193 | ) |
|
194 | 194 | |
|
195 | 195 | if cache == b'update': |
|
196 | 196 | try: |
|
197 | 197 | ui.note(_(b'reading cvs log cache %s\n') % cachefile) |
|
198 | 198 | oldlog = pickle.load(open(cachefile, b'rb')) |
|
199 | 199 | for e in oldlog: |
|
200 | 200 | if not ( |
|
201 | 201 | util.safehasattr(e, b'branchpoints') |
|
202 | 202 | and util.safehasattr(e, b'commitid') |
|
203 | 203 | and util.safehasattr(e, b'mergepoint') |
|
204 | 204 | ): |
|
205 | 205 | ui.status(_(b'ignoring old cache\n')) |
|
206 | 206 | oldlog = [] |
|
207 | 207 | break |
|
208 | 208 | |
|
209 | 209 | ui.note(_(b'cache has %d log entries\n') % len(oldlog)) |
|
210 | 210 | except Exception as e: |
|
211 | 211 | ui.note(_(b'error reading cache: %r\n') % e) |
|
212 | 212 | |
|
213 | 213 | if oldlog: |
|
214 | 214 | date = oldlog[-1].date # last commit date as a (time,tz) tuple |
|
215 | 215 | date = dateutil.datestr(date, b'%Y/%m/%d %H:%M:%S %1%2') |
|
216 | 216 | |
|
217 | 217 | # build the CVS commandline |
|
218 | 218 | cmd = [b'cvs', b'-q'] |
|
219 | 219 | if root: |
|
220 | 220 | cmd.append(b'-d%s' % root) |
|
221 | 221 | p = util.normpath(getrepopath(root)) |
|
222 | 222 | if not p.endswith(b'/'): |
|
223 | 223 | p += b'/' |
|
224 | 224 | if prefix: |
|
225 | 225 | # looks like normpath replaces "" by "." |
|
226 | 226 | prefix = p + util.normpath(prefix) |
|
227 | 227 | else: |
|
228 | 228 | prefix = p |
|
229 | 229 | cmd.append([b'log', b'rlog'][rlog]) |
|
230 | 230 | if date: |
|
231 | 231 | # no space between option and date string |
|
232 | 232 | cmd.append(b'-d>%s' % date) |
|
233 | 233 | cmd.append(directory) |
|
234 | 234 | |
|
235 | 235 | # state machine begins here |
|
236 | 236 | tags = {} # dictionary of revisions on current file with their tags |
|
237 | 237 | branchmap = {} # mapping between branch names and revision numbers |
|
238 | 238 | rcsmap = {} |
|
239 | 239 | state = 0 |
|
240 | 240 | store = False # set when a new record can be appended |
|
241 | 241 | |
|
242 | 242 | cmd = [procutil.shellquote(arg) for arg in cmd] |
|
243 | 243 | ui.note(_(b"running %s\n") % (b' '.join(cmd))) |
|
244 | 244 | ui.debug(b"prefix=%r directory=%r root=%r\n" % (prefix, directory, root)) |
|
245 | 245 | |
|
246 | 246 | pfp = procutil.popen(b' '.join(cmd), b'rb') |
|
247 | 247 | peek = util.fromnativeeol(pfp.readline()) |
|
248 | 248 | while True: |
|
249 | 249 | line = peek |
|
250 | 250 | if line == b'': |
|
251 | 251 | break |
|
252 | 252 | peek = util.fromnativeeol(pfp.readline()) |
|
253 | 253 | if line.endswith(b'\n'): |
|
254 | 254 | line = line[:-1] |
|
255 | 255 | # ui.debug('state=%d line=%r\n' % (state, line)) |
|
256 | 256 | |
|
257 | 257 | if state == 0: |
|
258 | 258 | # initial state, consume input until we see 'RCS file' |
|
259 | 259 | match = re_00.match(line) |
|
260 | 260 | if match: |
|
261 | 261 | rcs = match.group(1) |
|
262 | 262 | tags = {} |
|
263 | 263 | if rlog: |
|
264 | 264 | filename = util.normpath(rcs[:-2]) |
|
265 | 265 | if filename.startswith(prefix): |
|
266 | 266 | filename = filename[len(prefix) :] |
|
267 | 267 | if filename.startswith(b'/'): |
|
268 | 268 | filename = filename[1:] |
|
269 | 269 | if filename.startswith(b'Attic/'): |
|
270 | 270 | filename = filename[6:] |
|
271 | 271 | else: |
|
272 | 272 | filename = filename.replace(b'/Attic/', b'/') |
|
273 | 273 | state = 2 |
|
274 | 274 | continue |
|
275 | 275 | state = 1 |
|
276 | 276 | continue |
|
277 | 277 | match = re_01.match(line) |
|
278 | 278 | if match: |
|
279 | 279 | raise logerror(match.group(1)) |
|
280 | 280 | match = re_02.match(line) |
|
281 | 281 | if match: |
|
282 | 282 | raise logerror(match.group(2)) |
|
283 | 283 | if re_03.match(line): |
|
284 | 284 | raise logerror(line) |
|
285 | 285 | |
|
286 | 286 | elif state == 1: |
|
287 | 287 | # expect 'Working file' (only when using log instead of rlog) |
|
288 | 288 | match = re_10.match(line) |
|
289 | 289 | assert match, _(b'RCS file must be followed by working file') |
|
290 | 290 | filename = util.normpath(match.group(1)) |
|
291 | 291 | state = 2 |
|
292 | 292 | |
|
293 | 293 | elif state == 2: |
|
294 | 294 | # expect 'symbolic names' |
|
295 | 295 | if re_20.match(line): |
|
296 | 296 | branchmap = {} |
|
297 | 297 | state = 3 |
|
298 | 298 | |
|
299 | 299 | elif state == 3: |
|
300 | 300 | # read the symbolic names and store as tags |
|
301 | 301 | match = re_30.match(line) |
|
302 | 302 | if match: |
|
303 | 303 | rev = [int(x) for x in match.group(2).split(b'.')] |
|
304 | 304 | |
|
305 | 305 | # Convert magic branch number to an odd-numbered one |
|
306 | 306 | revn = len(rev) |
|
307 | 307 | if revn > 3 and (revn % 2) == 0 and rev[-2] == 0: |
|
308 | 308 | rev = rev[:-2] + rev[-1:] |
|
309 | 309 | rev = tuple(rev) |
|
310 | 310 | |
|
311 | 311 | if rev not in tags: |
|
312 | 312 | tags[rev] = [] |
|
313 | 313 | tags[rev].append(match.group(1)) |
|
314 | 314 | branchmap[match.group(1)] = match.group(2) |
|
315 | 315 | |
|
316 | 316 | elif re_31.match(line): |
|
317 | 317 | state = 5 |
|
318 | 318 | elif re_32.match(line): |
|
319 | 319 | state = 0 |
|
320 | 320 | |
|
321 | 321 | elif state == 4: |
|
322 | 322 | # expecting '------' separator before first revision |
|
323 | 323 | if re_31.match(line): |
|
324 | 324 | state = 5 |
|
325 | 325 | else: |
|
326 | 326 | assert not re_32.match(line), _( |
|
327 | 327 | b'must have at least some revisions' |
|
328 | 328 | ) |
|
329 | 329 | |
|
330 | 330 | elif state == 5: |
|
331 | 331 | # expecting revision number and possibly (ignored) lock indication |
|
332 | 332 | # we create the logentry here from values stored in states 0 to 4, |
|
333 | 333 | # as this state is re-entered for subsequent revisions of a file. |
|
334 | 334 | match = re_50.match(line) |
|
335 | 335 | assert match, _(b'expected revision number') |
|
336 | 336 | e = logentry( |
|
337 | 337 | rcs=scache(rcs), |
|
338 | 338 | file=scache(filename), |
|
339 | 339 | revision=tuple([int(x) for x in match.group(1).split(b'.')]), |
|
340 | 340 | branches=[], |
|
341 | 341 | parent=None, |
|
342 | 342 | commitid=None, |
|
343 | 343 | mergepoint=None, |
|
344 | 344 | branchpoints=set(), |
|
345 | 345 | ) |
|
346 | 346 | |
|
347 | 347 | state = 6 |
|
348 | 348 | |
|
349 | 349 | elif state == 6: |
|
350 | 350 | # expecting date, author, state, lines changed |
|
351 | 351 | match = re_60.match(line) |
|
352 | 352 | assert match, _(b'revision must be followed by date line') |
|
353 | 353 | d = match.group(1) |
|
354 | 354 | if d[2] == b'/': |
|
355 | 355 | # Y2K |
|
356 | 356 | d = b'19' + d |
|
357 | 357 | |
|
358 | 358 | if len(d.split()) != 3: |
|
359 | 359 | # cvs log dates always in GMT |
|
360 | 360 | d = d + b' UTC' |
|
361 | 361 | e.date = dateutil.parsedate( |
|
362 | 362 | d, |
|
363 | 363 | [ |
|
364 | 364 | b'%y/%m/%d %H:%M:%S', |
|
365 | 365 | b'%Y/%m/%d %H:%M:%S', |
|
366 | 366 | b'%Y-%m-%d %H:%M:%S', |
|
367 | 367 | ], |
|
368 | 368 | ) |
|
369 | 369 | e.author = scache(match.group(2)) |
|
370 | 370 | e.dead = match.group(3).lower() == b'dead' |
|
371 | 371 | |
|
372 | 372 | if match.group(5): |
|
373 | 373 | if match.group(6): |
|
374 | 374 | e.lines = (int(match.group(5)), int(match.group(6))) |
|
375 | 375 | else: |
|
376 | 376 | e.lines = (int(match.group(5)), 0) |
|
377 | 377 | elif match.group(6): |
|
378 | 378 | e.lines = (0, int(match.group(6))) |
|
379 | 379 | else: |
|
380 | 380 | e.lines = None |
|
381 | 381 | |
|
382 | 382 | if match.group(7): # cvs 1.12 commitid |
|
383 | 383 | e.commitid = match.group(8) |
|
384 | 384 | |
|
385 | 385 | if match.group(9): # cvsnt mergepoint |
|
386 | 386 | myrev = match.group(10).split(b'.') |
|
387 | 387 | if len(myrev) == 2: # head |
|
388 | 388 | e.mergepoint = b'HEAD' |
|
389 | 389 | else: |
|
390 | 390 | myrev = b'.'.join(myrev[:-2] + [b'0', myrev[-2]]) |
|
391 | 391 | branches = [b for b in branchmap if branchmap[b] == myrev] |
|
392 | 392 | assert len(branches) == 1, ( |
|
393 | 393 | b'unknown branch: %s' % e.mergepoint |
|
394 | 394 | ) |
|
395 | 395 | e.mergepoint = branches[0] |
|
396 | 396 | |
|
397 | 397 | e.comment = [] |
|
398 | 398 | state = 7 |
|
399 | 399 | |
|
400 | 400 | elif state == 7: |
|
401 | 401 | # read the revision numbers of branches that start at this revision |
|
402 | 402 | # or store the commit log message otherwise |
|
403 | 403 | m = re_70.match(line) |
|
404 | 404 | if m: |
|
405 | 405 | e.branches = [ |
|
406 | 406 | tuple([int(y) for y in x.strip().split(b'.')]) |
|
407 | 407 | for x in m.group(1).split(b';') |
|
408 | 408 | ] |
|
409 | 409 | state = 8 |
|
410 | 410 | elif re_31.match(line) and re_50.match(peek): |
|
411 | 411 | state = 5 |
|
412 | 412 | store = True |
|
413 | 413 | elif re_32.match(line): |
|
414 | 414 | state = 0 |
|
415 | 415 | store = True |
|
416 | 416 | else: |
|
417 | 417 | e.comment.append(line) |
|
418 | 418 | |
|
419 | 419 | elif state == 8: |
|
420 | 420 | # store commit log message |
|
421 | 421 | if re_31.match(line): |
|
422 | 422 | cpeek = peek |
|
423 | 423 | if cpeek.endswith(b'\n'): |
|
424 | 424 | cpeek = cpeek[:-1] |
|
425 | 425 | if re_50.match(cpeek): |
|
426 | 426 | state = 5 |
|
427 | 427 | store = True |
|
428 | 428 | else: |
|
429 | 429 | e.comment.append(line) |
|
430 | 430 | elif re_32.match(line): |
|
431 | 431 | state = 0 |
|
432 | 432 | store = True |
|
433 | 433 | else: |
|
434 | 434 | e.comment.append(line) |
|
435 | 435 | |
|
436 | 436 | # When a file is added on a branch B1, CVS creates a synthetic |
|
437 | 437 | # dead trunk revision 1.1 so that the branch has a root. |
|
438 | 438 | # Likewise, if you merge such a file to a later branch B2 (one |
|
439 | 439 | # that already existed when the file was added on B1), CVS |
|
440 | 440 | # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop |
|
441 | 441 | # these revisions now, but mark them synthetic so |
|
442 | 442 | # createchangeset() can take care of them. |
|
443 | 443 | if ( |
|
444 | 444 | store |
|
445 | 445 | and e.dead |
|
446 | 446 | and e.revision[-1] == 1 |
|
447 | 447 | and len(e.comment) == 1 # 1.1 or 1.1.x.1 |
|
448 | 448 | and file_added_re.match(e.comment[0]) |
|
449 | 449 | ): |
|
450 | 450 | ui.debug( |
|
451 | 451 | b'found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0]) |
|
452 | 452 | ) |
|
453 | 453 | e.synthetic = True |
|
454 | 454 | |
|
455 | 455 | if store: |
|
456 | 456 | # clean up the results and save in the log. |
|
457 | 457 | store = False |
|
458 | 458 | e.tags = sorted([scache(x) for x in tags.get(e.revision, [])]) |
|
459 | 459 | e.comment = scache(b'\n'.join(e.comment)) |
|
460 | 460 | |
|
461 | 461 | revn = len(e.revision) |
|
462 | 462 | if revn > 3 and (revn % 2) == 0: |
|
463 | 463 | e.branch = tags.get(e.revision[:-1], [None])[0] |
|
464 | 464 | else: |
|
465 | 465 | e.branch = None |
|
466 | 466 | |
|
467 | 467 | # find the branches starting from this revision |
|
468 | 468 | branchpoints = set() |
|
469 |
for branch, revision in |
|
|
469 | for branch, revision in branchmap.items(): | |
|
470 | 470 | revparts = tuple([int(i) for i in revision.split(b'.')]) |
|
471 | 471 | if len(revparts) < 2: # bad tags |
|
472 | 472 | continue |
|
473 | 473 | if revparts[-2] == 0 and revparts[-1] % 2 == 0: |
|
474 | 474 | # normal branch |
|
475 | 475 | if revparts[:-2] == e.revision: |
|
476 | 476 | branchpoints.add(branch) |
|
477 | 477 | elif revparts == (1, 1, 1): # vendor branch |
|
478 | 478 | if revparts in e.branches: |
|
479 | 479 | branchpoints.add(branch) |
|
480 | 480 | e.branchpoints = branchpoints |
|
481 | 481 | |
|
482 | 482 | log.append(e) |
|
483 | 483 | |
|
484 | 484 | rcsmap[e.rcs.replace(b'/Attic/', b'/')] = e.rcs |
|
485 | 485 | |
|
486 | 486 | if len(log) % 100 == 0: |
|
487 | 487 | ui.status( |
|
488 | 488 | stringutil.ellipsis(b'%d %s' % (len(log), e.file), 80) |
|
489 | 489 | + b'\n' |
|
490 | 490 | ) |
|
491 | 491 | |
|
492 | 492 | log.sort(key=lambda x: (x.rcs, x.revision)) |
|
493 | 493 | |
|
494 | 494 | # find parent revisions of individual files |
|
495 | 495 | versions = {} |
|
496 | 496 | for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)): |
|
497 | 497 | rcs = e.rcs.replace(b'/Attic/', b'/') |
|
498 | 498 | if rcs in rcsmap: |
|
499 | 499 | e.rcs = rcsmap[rcs] |
|
500 | 500 | branch = e.revision[:-1] |
|
501 | 501 | versions[(e.rcs, branch)] = e.revision |
|
502 | 502 | |
|
503 | 503 | for e in log: |
|
504 | 504 | branch = e.revision[:-1] |
|
505 | 505 | p = versions.get((e.rcs, branch), None) |
|
506 | 506 | if p is None: |
|
507 | 507 | p = e.revision[:-2] |
|
508 | 508 | e.parent = p |
|
509 | 509 | versions[(e.rcs, branch)] = e.revision |
|
510 | 510 | |
|
511 | 511 | # update the log cache |
|
512 | 512 | if cache: |
|
513 | 513 | if log: |
|
514 | 514 | # join up the old and new logs |
|
515 | 515 | log.sort(key=lambda x: x.date) |
|
516 | 516 | |
|
517 | 517 | if oldlog and oldlog[-1].date >= log[0].date: |
|
518 | 518 | raise logerror( |
|
519 | 519 | _( |
|
520 | 520 | b'log cache overlaps with new log entries,' |
|
521 | 521 | b' re-run without cache.' |
|
522 | 522 | ) |
|
523 | 523 | ) |
|
524 | 524 | |
|
525 | 525 | log = oldlog + log |
|
526 | 526 | |
|
527 | 527 | # write the new cachefile |
|
528 | 528 | ui.note(_(b'writing cvs log cache %s\n') % cachefile) |
|
529 | 529 | pickle.dump(log, open(cachefile, b'wb')) |
|
530 | 530 | else: |
|
531 | 531 | log = oldlog |
|
532 | 532 | |
|
533 | 533 | ui.status(_(b'%d log entries\n') % len(log)) |
|
534 | 534 | |
|
535 | 535 | encodings = ui.configlist(b'convert', b'cvsps.logencoding') |
|
536 | 536 | if encodings: |
|
537 | 537 | |
|
538 | 538 | def revstr(r): |
|
539 | 539 | # this is needed, because logentry.revision is a tuple of "int" |
|
540 | 540 | # (e.g. (1, 2) for "1.2") |
|
541 | 541 | return b'.'.join(pycompat.maplist(pycompat.bytestr, r)) |
|
542 | 542 | |
|
543 | 543 | for entry in log: |
|
544 | 544 | comment = entry.comment |
|
545 | 545 | for e in encodings: |
|
546 | 546 | try: |
|
547 | 547 | entry.comment = comment.decode(pycompat.sysstr(e)).encode( |
|
548 | 548 | 'utf-8' |
|
549 | 549 | ) |
|
550 | 550 | if ui.debugflag: |
|
551 | 551 | ui.debug( |
|
552 | 552 | b"transcoding by %s: %s of %s\n" |
|
553 | 553 | % (e, revstr(entry.revision), entry.file) |
|
554 | 554 | ) |
|
555 | 555 | break |
|
556 | 556 | except UnicodeDecodeError: |
|
557 | 557 | pass # try next encoding |
|
558 | 558 | except LookupError as inst: # unknown encoding, maybe |
|
559 | 559 | raise error.Abort( |
|
560 | 560 | pycompat.bytestr(inst), |
|
561 | 561 | hint=_( |
|
562 | 562 | b'check convert.cvsps.logencoding configuration' |
|
563 | 563 | ), |
|
564 | 564 | ) |
|
565 | 565 | else: |
|
566 | 566 | raise error.Abort( |
|
567 | 567 | _( |
|
568 | 568 | b"no encoding can transcode" |
|
569 | 569 | b" CVS log message for %s of %s" |
|
570 | 570 | ) |
|
571 | 571 | % (revstr(entry.revision), entry.file), |
|
572 | 572 | hint=_(b'check convert.cvsps.logencoding configuration'), |
|
573 | 573 | ) |
|
574 | 574 | |
|
575 | 575 | hook.hook(ui, None, b"cvslog", True, log=log) |
|
576 | 576 | |
|
577 | 577 | return log |
|
578 | 578 | |
|
579 | 579 | |
|
580 | 580 | class changeset(object): |
|
581 | 581 | """Class changeset has the following attributes: |
|
582 | 582 | .id - integer identifying this changeset (list index) |
|
583 | 583 | .author - author name as CVS knows it |
|
584 | 584 | .branch - name of branch this changeset is on, or None |
|
585 | 585 | .comment - commit message |
|
586 | 586 | .commitid - CVS commitid or None |
|
587 | 587 | .date - the commit date as a (time,tz) tuple |
|
588 | 588 | .entries - list of logentry objects in this changeset |
|
589 | 589 | .parents - list of one or two parent changesets |
|
590 | 590 | .tags - list of tags on this changeset |
|
591 | 591 | .synthetic - from synthetic revision "file ... added on branch ..." |
|
592 | 592 | .mergepoint- the branch that has been merged from or None |
|
593 | 593 | .branchpoints- the branches that start at the current entry or empty |
|
594 | 594 | """ |
|
595 | 595 | |
|
596 | 596 | def __init__(self, **entries): |
|
597 | 597 | self.id = None |
|
598 | 598 | self.synthetic = False |
|
599 | 599 | self.__dict__.update(entries) |
|
600 | 600 | |
|
601 | 601 | def __repr__(self): |
|
602 | 602 | items = ( |
|
603 | 603 | b"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__) |
|
604 | 604 | ) |
|
605 | 605 | return b"%s(%s)" % (type(self).__name__, b", ".join(items)) |
|
606 | 606 | |
|
607 | 607 | |
|
608 | 608 | def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None): |
|
609 | 609 | '''Convert log into changesets.''' |
|
610 | 610 | |
|
611 | 611 | ui.status(_(b'creating changesets\n')) |
|
612 | 612 | |
|
613 | 613 | # try to order commitids by date |
|
614 | 614 | mindate = {} |
|
615 | 615 | for e in log: |
|
616 | 616 | if e.commitid: |
|
617 | 617 | if e.commitid not in mindate: |
|
618 | 618 | mindate[e.commitid] = e.date |
|
619 | 619 | else: |
|
620 | 620 | mindate[e.commitid] = min(e.date, mindate[e.commitid]) |
|
621 | 621 | |
|
622 | 622 | # Merge changesets |
|
623 | 623 | log.sort( |
|
624 | 624 | key=lambda x: ( |
|
625 | 625 | mindate.get(x.commitid, (-1, 0)), |
|
626 | 626 | x.commitid or b'', |
|
627 | 627 | x.comment, |
|
628 | 628 | x.author, |
|
629 | 629 | x.branch or b'', |
|
630 | 630 | x.date, |
|
631 | 631 | x.branchpoints, |
|
632 | 632 | ) |
|
633 | 633 | ) |
|
634 | 634 | |
|
635 | 635 | changesets = [] |
|
636 | 636 | files = set() |
|
637 | 637 | c = None |
|
638 | 638 | for i, e in enumerate(log): |
|
639 | 639 | |
|
640 | 640 | # Check if log entry belongs to the current changeset or not. |
|
641 | 641 | |
|
642 | 642 | # Since CVS is file-centric, two different file revisions with |
|
643 | 643 | # different branchpoints should be treated as belonging to two |
|
644 | 644 | # different changesets (and the ordering is important and not |
|
645 | 645 | # honoured by cvsps at this point). |
|
646 | 646 | # |
|
647 | 647 | # Consider the following case: |
|
648 | 648 | # foo 1.1 branchpoints: [MYBRANCH] |
|
649 | 649 | # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2] |
|
650 | 650 | # |
|
651 | 651 | # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a |
|
652 | 652 | # later version of foo may be in MYBRANCH2, so foo should be the |
|
653 | 653 | # first changeset and bar the next and MYBRANCH and MYBRANCH2 |
|
654 | 654 | # should both start off of the bar changeset. No provisions are |
|
655 | 655 | # made to ensure that this is, in fact, what happens. |
|
656 | 656 | if not ( |
|
657 | 657 | c |
|
658 | 658 | and e.branchpoints == c.branchpoints |
|
659 | 659 | and ( # cvs commitids |
|
660 | 660 | (e.commitid is not None and e.commitid == c.commitid) |
|
661 | 661 | or ( # no commitids, use fuzzy commit detection |
|
662 | 662 | (e.commitid is None or c.commitid is None) |
|
663 | 663 | and e.comment == c.comment |
|
664 | 664 | and e.author == c.author |
|
665 | 665 | and e.branch == c.branch |
|
666 | 666 | and ( |
|
667 | 667 | (c.date[0] + c.date[1]) |
|
668 | 668 | <= (e.date[0] + e.date[1]) |
|
669 | 669 | <= (c.date[0] + c.date[1]) + fuzz |
|
670 | 670 | ) |
|
671 | 671 | and e.file not in files |
|
672 | 672 | ) |
|
673 | 673 | ) |
|
674 | 674 | ): |
|
675 | 675 | c = changeset( |
|
676 | 676 | comment=e.comment, |
|
677 | 677 | author=e.author, |
|
678 | 678 | branch=e.branch, |
|
679 | 679 | date=e.date, |
|
680 | 680 | entries=[], |
|
681 | 681 | mergepoint=e.mergepoint, |
|
682 | 682 | branchpoints=e.branchpoints, |
|
683 | 683 | commitid=e.commitid, |
|
684 | 684 | ) |
|
685 | 685 | changesets.append(c) |
|
686 | 686 | |
|
687 | 687 | files = set() |
|
688 | 688 | if len(changesets) % 100 == 0: |
|
689 | 689 | t = b'%d %s' % (len(changesets), repr(e.comment)[1:-1]) |
|
690 | 690 | ui.status(stringutil.ellipsis(t, 80) + b'\n') |
|
691 | 691 | |
|
692 | 692 | c.entries.append(e) |
|
693 | 693 | files.add(e.file) |
|
694 | 694 | c.date = e.date # changeset date is date of latest commit in it |
|
695 | 695 | |
|
696 | 696 | # Mark synthetic changesets |
|
697 | 697 | |
|
698 | 698 | for c in changesets: |
|
699 | 699 | # Synthetic revisions always get their own changeset, because |
|
700 | 700 | # the log message includes the filename. E.g. if you add file3 |
|
701 | 701 | # and file4 on a branch, you get four log entries and three |
|
702 | 702 | # changesets: |
|
703 | 703 | # "File file3 was added on branch ..." (synthetic, 1 entry) |
|
704 | 704 | # "File file4 was added on branch ..." (synthetic, 1 entry) |
|
705 | 705 | # "Add file3 and file4 to fix ..." (real, 2 entries) |
|
706 | 706 | # Hence the check for 1 entry here. |
|
707 | 707 | c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic |
|
708 | 708 | |
|
709 | 709 | # Sort files in each changeset |
|
710 | 710 | |
|
711 | 711 | def entitycompare(l, r): |
|
712 | 712 | """Mimic cvsps sorting order""" |
|
713 | 713 | l = l.file.split(b'/') |
|
714 | 714 | r = r.file.split(b'/') |
|
715 | 715 | nl = len(l) |
|
716 | 716 | nr = len(r) |
|
717 | 717 | n = min(nl, nr) |
|
718 | 718 | for i in range(n): |
|
719 | 719 | if i + 1 == nl and nl < nr: |
|
720 | 720 | return -1 |
|
721 | 721 | elif i + 1 == nr and nl > nr: |
|
722 | 722 | return +1 |
|
723 | 723 | elif l[i] < r[i]: |
|
724 | 724 | return -1 |
|
725 | 725 | elif l[i] > r[i]: |
|
726 | 726 | return +1 |
|
727 | 727 | return 0 |
|
728 | 728 | |
|
729 | 729 | for c in changesets: |
|
730 | 730 | c.entries.sort(key=functools.cmp_to_key(entitycompare)) |
|
731 | 731 | |
|
732 | 732 | # Sort changesets by date |
|
733 | 733 | |
|
734 | 734 | odd = set() |
|
735 | 735 | |
|
736 | 736 | def cscmp(l, r): |
|
737 | 737 | d = sum(l.date) - sum(r.date) |
|
738 | 738 | if d: |
|
739 | 739 | return d |
|
740 | 740 | |
|
741 | 741 | # detect vendor branches and initial commits on a branch |
|
742 | 742 | le = {} |
|
743 | 743 | for e in l.entries: |
|
744 | 744 | le[e.rcs] = e.revision |
|
745 | 745 | re = {} |
|
746 | 746 | for e in r.entries: |
|
747 | 747 | re[e.rcs] = e.revision |
|
748 | 748 | |
|
749 | 749 | d = 0 |
|
750 | 750 | for e in l.entries: |
|
751 | 751 | if re.get(e.rcs, None) == e.parent: |
|
752 | 752 | assert not d |
|
753 | 753 | d = 1 |
|
754 | 754 | break |
|
755 | 755 | |
|
756 | 756 | for e in r.entries: |
|
757 | 757 | if le.get(e.rcs, None) == e.parent: |
|
758 | 758 | if d: |
|
759 | 759 | odd.add((l, r)) |
|
760 | 760 | d = -1 |
|
761 | 761 | break |
|
762 | 762 | # By this point, the changesets are sufficiently compared that |
|
763 | 763 | # we don't really care about ordering. However, this leaves |
|
764 | 764 | # some race conditions in the tests, so we compare on the |
|
765 | 765 | # number of files modified, the files contained in each |
|
766 | 766 | # changeset, and the branchpoints in the change to ensure test |
|
767 | 767 | # output remains stable. |
|
768 | 768 | |
|
769 | 769 | # recommended replacement for cmp from |
|
770 | 770 | # https://docs.python.org/3.0/whatsnew/3.0.html |
|
771 | 771 | c = lambda x, y: (x > y) - (x < y) |
|
772 | 772 | # Sort bigger changes first. |
|
773 | 773 | if not d: |
|
774 | 774 | d = c(len(l.entries), len(r.entries)) |
|
775 | 775 | # Try sorting by filename in the change. |
|
776 | 776 | if not d: |
|
777 | 777 | d = c([e.file for e in l.entries], [e.file for e in r.entries]) |
|
778 | 778 | # Try and put changes without a branch point before ones with |
|
779 | 779 | # a branch point. |
|
780 | 780 | if not d: |
|
781 | 781 | d = c(len(l.branchpoints), len(r.branchpoints)) |
|
782 | 782 | return d |
|
783 | 783 | |
|
784 | 784 | changesets.sort(key=functools.cmp_to_key(cscmp)) |
|
785 | 785 | |
|
786 | 786 | # Collect tags |
|
787 | 787 | |
|
788 | 788 | globaltags = {} |
|
789 | 789 | for c in changesets: |
|
790 | 790 | for e in c.entries: |
|
791 | 791 | for tag in e.tags: |
|
792 | 792 | # remember which is the latest changeset to have this tag |
|
793 | 793 | globaltags[tag] = c |
|
794 | 794 | |
|
795 | 795 | for c in changesets: |
|
796 | 796 | tags = set() |
|
797 | 797 | for e in c.entries: |
|
798 | 798 | tags.update(e.tags) |
|
799 | 799 | # remember tags only if this is the latest changeset to have it |
|
800 | 800 | c.tags = sorted(tag for tag in tags if globaltags[tag] is c) |
|
801 | 801 | |
|
802 | 802 | # Find parent changesets, handle {{mergetobranch BRANCHNAME}} |
|
803 | 803 | # by inserting dummy changesets with two parents, and handle |
|
804 | 804 | # {{mergefrombranch BRANCHNAME}} by setting two parents. |
|
805 | 805 | |
|
806 | 806 | if mergeto is None: |
|
807 | 807 | mergeto = br'{{mergetobranch ([-\w]+)}}' |
|
808 | 808 | if mergeto: |
|
809 | 809 | mergeto = re.compile(mergeto) |
|
810 | 810 | |
|
811 | 811 | if mergefrom is None: |
|
812 | 812 | mergefrom = br'{{mergefrombranch ([-\w]+)}}' |
|
813 | 813 | if mergefrom: |
|
814 | 814 | mergefrom = re.compile(mergefrom) |
|
815 | 815 | |
|
816 | 816 | versions = {} # changeset index where we saw any particular file version |
|
817 | 817 | branches = {} # changeset index where we saw a branch |
|
818 | 818 | n = len(changesets) |
|
819 | 819 | i = 0 |
|
820 | 820 | while i < n: |
|
821 | 821 | c = changesets[i] |
|
822 | 822 | |
|
823 | 823 | for f in c.entries: |
|
824 | 824 | versions[(f.rcs, f.revision)] = i |
|
825 | 825 | |
|
826 | 826 | p = None |
|
827 | 827 | if c.branch in branches: |
|
828 | 828 | p = branches[c.branch] |
|
829 | 829 | else: |
|
830 | 830 | # first changeset on a new branch |
|
831 | 831 | # the parent is a changeset with the branch in its |
|
832 | 832 | # branchpoints such that it is the latest possible |
|
833 | 833 | # commit without any intervening, unrelated commits. |
|
834 | 834 | |
|
835 | 835 | for candidate in pycompat.xrange(i): |
|
836 | 836 | if c.branch not in changesets[candidate].branchpoints: |
|
837 | 837 | if p is not None: |
|
838 | 838 | break |
|
839 | 839 | continue |
|
840 | 840 | p = candidate |
|
841 | 841 | |
|
842 | 842 | c.parents = [] |
|
843 | 843 | if p is not None: |
|
844 | 844 | p = changesets[p] |
|
845 | 845 | |
|
846 | 846 | # Ensure no changeset has a synthetic changeset as a parent. |
|
847 | 847 | while p.synthetic: |
|
848 | 848 | assert len(p.parents) <= 1, _( |
|
849 | 849 | b'synthetic changeset cannot have multiple parents' |
|
850 | 850 | ) |
|
851 | 851 | if p.parents: |
|
852 | 852 | p = p.parents[0] |
|
853 | 853 | else: |
|
854 | 854 | p = None |
|
855 | 855 | break |
|
856 | 856 | |
|
857 | 857 | if p is not None: |
|
858 | 858 | c.parents.append(p) |
|
859 | 859 | |
|
860 | 860 | if c.mergepoint: |
|
861 | 861 | if c.mergepoint == b'HEAD': |
|
862 | 862 | c.mergepoint = None |
|
863 | 863 | c.parents.append(changesets[branches[c.mergepoint]]) |
|
864 | 864 | |
|
865 | 865 | if mergefrom: |
|
866 | 866 | m = mergefrom.search(c.comment) |
|
867 | 867 | if m: |
|
868 | 868 | m = m.group(1) |
|
869 | 869 | if m == b'HEAD': |
|
870 | 870 | m = None |
|
871 | 871 | try: |
|
872 | 872 | candidate = changesets[branches[m]] |
|
873 | 873 | except KeyError: |
|
874 | 874 | ui.warn( |
|
875 | 875 | _( |
|
876 | 876 | b"warning: CVS commit message references " |
|
877 | 877 | b"non-existent branch %r:\n%s\n" |
|
878 | 878 | ) |
|
879 | 879 | % (pycompat.bytestr(m), c.comment) |
|
880 | 880 | ) |
|
881 | 881 | if m in branches and c.branch != m and not candidate.synthetic: |
|
882 | 882 | c.parents.append(candidate) |
|
883 | 883 | |
|
884 | 884 | if mergeto: |
|
885 | 885 | m = mergeto.search(c.comment) |
|
886 | 886 | if m: |
|
887 | 887 | if m.groups(): |
|
888 | 888 | m = m.group(1) |
|
889 | 889 | if m == b'HEAD': |
|
890 | 890 | m = None |
|
891 | 891 | else: |
|
892 | 892 | m = None # if no group found then merge to HEAD |
|
893 | 893 | if m in branches and c.branch != m: |
|
894 | 894 | # insert empty changeset for merge |
|
895 | 895 | cc = changeset( |
|
896 | 896 | author=c.author, |
|
897 | 897 | branch=m, |
|
898 | 898 | date=c.date, |
|
899 | 899 | comment=b'convert-repo: CVS merge from branch %s' |
|
900 | 900 | % c.branch, |
|
901 | 901 | entries=[], |
|
902 | 902 | tags=[], |
|
903 | 903 | parents=[changesets[branches[m]], c], |
|
904 | 904 | ) |
|
905 | 905 | changesets.insert(i + 1, cc) |
|
906 | 906 | branches[m] = i + 1 |
|
907 | 907 | |
|
908 | 908 | # adjust our loop counters now we have inserted a new entry |
|
909 | 909 | n += 1 |
|
910 | 910 | i += 2 |
|
911 | 911 | continue |
|
912 | 912 | |
|
913 | 913 | branches[c.branch] = i |
|
914 | 914 | i += 1 |
|
915 | 915 | |
|
916 | 916 | # Drop synthetic changesets (safe now that we have ensured no other |
|
917 | 917 | # changesets can have them as parents). |
|
918 | 918 | i = 0 |
|
919 | 919 | while i < len(changesets): |
|
920 | 920 | if changesets[i].synthetic: |
|
921 | 921 | del changesets[i] |
|
922 | 922 | else: |
|
923 | 923 | i += 1 |
|
924 | 924 | |
|
925 | 925 | # Number changesets |
|
926 | 926 | |
|
927 | 927 | for i, c in enumerate(changesets): |
|
928 | 928 | c.id = i + 1 |
|
929 | 929 | |
|
930 | 930 | if odd: |
|
931 | 931 | for l, r in odd: |
|
932 | 932 | if l.id is not None and r.id is not None: |
|
933 | 933 | ui.warn( |
|
934 | 934 | _(b'changeset %d is both before and after %d\n') |
|
935 | 935 | % (l.id, r.id) |
|
936 | 936 | ) |
|
937 | 937 | |
|
938 | 938 | ui.status(_(b'%d changeset entries\n') % len(changesets)) |
|
939 | 939 | |
|
940 | 940 | hook.hook(ui, None, b"cvschangesets", True, changesets=changesets) |
|
941 | 941 | |
|
942 | 942 | return changesets |
|
943 | 943 | |
|
944 | 944 | |
|
945 | 945 | def debugcvsps(ui, *args, **opts): |
|
946 | 946 | """Read CVS rlog for current directory or named path in |
|
947 | 947 | repository, and convert the log to changesets based on matching |
|
948 | 948 | commit log entries and dates. |
|
949 | 949 | """ |
|
950 | 950 | opts = pycompat.byteskwargs(opts) |
|
951 | 951 | if opts[b"new_cache"]: |
|
952 | 952 | cache = b"write" |
|
953 | 953 | elif opts[b"update_cache"]: |
|
954 | 954 | cache = b"update" |
|
955 | 955 | else: |
|
956 | 956 | cache = None |
|
957 | 957 | |
|
958 | 958 | revisions = opts[b"revisions"] |
|
959 | 959 | |
|
960 | 960 | try: |
|
961 | 961 | if args: |
|
962 | 962 | log = [] |
|
963 | 963 | for d in args: |
|
964 | 964 | log += createlog(ui, d, root=opts[b"root"], cache=cache) |
|
965 | 965 | else: |
|
966 | 966 | log = createlog(ui, root=opts[b"root"], cache=cache) |
|
967 | 967 | except logerror as e: |
|
968 | 968 | ui.write(b"%r\n" % e) |
|
969 | 969 | return |
|
970 | 970 | |
|
971 | 971 | changesets = createchangeset(ui, log, opts[b"fuzz"]) |
|
972 | 972 | del log |
|
973 | 973 | |
|
974 | 974 | # Print changesets (optionally filtered) |
|
975 | 975 | |
|
976 | 976 | off = len(revisions) |
|
977 | 977 | branches = {} # latest version number in each branch |
|
978 | 978 | ancestors = {} # parent branch |
|
979 | 979 | for cs in changesets: |
|
980 | 980 | |
|
981 | 981 | if opts[b"ancestors"]: |
|
982 | 982 | if cs.branch not in branches and cs.parents and cs.parents[0].id: |
|
983 | 983 | ancestors[cs.branch] = ( |
|
984 | 984 | changesets[cs.parents[0].id - 1].branch, |
|
985 | 985 | cs.parents[0].id, |
|
986 | 986 | ) |
|
987 | 987 | branches[cs.branch] = cs.id |
|
988 | 988 | |
|
989 | 989 | # limit by branches |
|
990 | 990 | if ( |
|
991 | 991 | opts[b"branches"] |
|
992 | 992 | and (cs.branch or b'HEAD') not in opts[b"branches"] |
|
993 | 993 | ): |
|
994 | 994 | continue |
|
995 | 995 | |
|
996 | 996 | if not off: |
|
997 | 997 | # Note: trailing spaces on several lines here are needed to have |
|
998 | 998 | # bug-for-bug compatibility with cvsps. |
|
999 | 999 | ui.write(b'---------------------\n') |
|
1000 | 1000 | ui.write((b'PatchSet %d \n' % cs.id)) |
|
1001 | 1001 | ui.write( |
|
1002 | 1002 | ( |
|
1003 | 1003 | b'Date: %s\n' |
|
1004 | 1004 | % dateutil.datestr(cs.date, b'%Y/%m/%d %H:%M:%S %1%2') |
|
1005 | 1005 | ) |
|
1006 | 1006 | ) |
|
1007 | 1007 | ui.write((b'Author: %s\n' % cs.author)) |
|
1008 | 1008 | ui.write((b'Branch: %s\n' % (cs.branch or b'HEAD'))) |
|
1009 | 1009 | ui.write( |
|
1010 | 1010 | ( |
|
1011 | 1011 | b'Tag%s: %s \n' |
|
1012 | 1012 | % ( |
|
1013 | 1013 | [b'', b's'][len(cs.tags) > 1], |
|
1014 | 1014 | b','.join(cs.tags) or b'(none)', |
|
1015 | 1015 | ) |
|
1016 | 1016 | ) |
|
1017 | 1017 | ) |
|
1018 | 1018 | if cs.branchpoints: |
|
1019 | 1019 | ui.writenoi18n( |
|
1020 | 1020 | b'Branchpoints: %s \n' % b', '.join(sorted(cs.branchpoints)) |
|
1021 | 1021 | ) |
|
1022 | 1022 | if opts[b"parents"] and cs.parents: |
|
1023 | 1023 | if len(cs.parents) > 1: |
|
1024 | 1024 | ui.write( |
|
1025 | 1025 | ( |
|
1026 | 1026 | b'Parents: %s\n' |
|
1027 | 1027 | % (b','.join([(b"%d" % p.id) for p in cs.parents])) |
|
1028 | 1028 | ) |
|
1029 | 1029 | ) |
|
1030 | 1030 | else: |
|
1031 | 1031 | ui.write((b'Parent: %d\n' % cs.parents[0].id)) |
|
1032 | 1032 | |
|
1033 | 1033 | if opts[b"ancestors"]: |
|
1034 | 1034 | b = cs.branch |
|
1035 | 1035 | r = [] |
|
1036 | 1036 | while b: |
|
1037 | 1037 | b, c = ancestors[b] |
|
1038 | 1038 | r.append(b'%s:%d:%d' % (b or b"HEAD", c, branches[b])) |
|
1039 | 1039 | if r: |
|
1040 | 1040 | ui.write((b'Ancestors: %s\n' % (b','.join(r)))) |
|
1041 | 1041 | |
|
1042 | 1042 | ui.writenoi18n(b'Log:\n') |
|
1043 | 1043 | ui.write(b'%s\n\n' % cs.comment) |
|
1044 | 1044 | ui.writenoi18n(b'Members: \n') |
|
1045 | 1045 | for f in cs.entries: |
|
1046 | 1046 | fn = f.file |
|
1047 | 1047 | if fn.startswith(opts[b"prefix"]): |
|
1048 | 1048 | fn = fn[len(opts[b"prefix"]) :] |
|
1049 | 1049 | ui.write( |
|
1050 | 1050 | b'\t%s:%s->%s%s \n' |
|
1051 | 1051 | % ( |
|
1052 | 1052 | fn, |
|
1053 | 1053 | b'.'.join([b"%d" % x for x in f.parent]) or b'INITIAL', |
|
1054 | 1054 | b'.'.join([(b"%d" % x) for x in f.revision]), |
|
1055 | 1055 | [b'', b'(DEAD)'][f.dead], |
|
1056 | 1056 | ) |
|
1057 | 1057 | ) |
|
1058 | 1058 | ui.write(b'\n') |
|
1059 | 1059 | |
|
1060 | 1060 | # have we seen the start tag? |
|
1061 | 1061 | if revisions and off: |
|
1062 | 1062 | if revisions[0] == (b"%d" % cs.id) or revisions[0] in cs.tags: |
|
1063 | 1063 | off = False |
|
1064 | 1064 | |
|
1065 | 1065 | # see if we reached the end tag |
|
1066 | 1066 | if len(revisions) > 1 and not off: |
|
1067 | 1067 | if revisions[1] == (b"%d" % cs.id) or revisions[1] in cs.tags: |
|
1068 | 1068 | break |
@@ -1,497 +1,497 b'' | |||
|
1 | 1 | # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com> |
|
2 | 2 | # Copyright 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br> |
|
3 | 3 | # |
|
4 | 4 | # This software may be used and distributed according to the terms of the |
|
5 | 5 | # GNU General Public License version 2 or any later version. |
|
6 | 6 | |
|
7 | 7 | |
|
8 | 8 | import posixpath |
|
9 | 9 | |
|
10 | 10 | from mercurial.i18n import _ |
|
11 | 11 | from mercurial import ( |
|
12 | 12 | error, |
|
13 | 13 | pycompat, |
|
14 | 14 | ) |
|
15 | 15 | from . import common |
|
16 | 16 | |
|
17 | 17 | SKIPREV = common.SKIPREV |
|
18 | 18 | |
|
19 | 19 | |
|
20 | 20 | def rpairs(path): |
|
21 | 21 | """Yield tuples with path split at '/', starting with the full path. |
|
22 | 22 | No leading, trailing or double '/', please. |
|
23 | 23 | >>> for x in rpairs(b'foo/bar/baz'): print(x) |
|
24 | 24 | ('foo/bar/baz', '') |
|
25 | 25 | ('foo/bar', 'baz') |
|
26 | 26 | ('foo', 'bar/baz') |
|
27 | 27 | ('.', 'foo/bar/baz') |
|
28 | 28 | """ |
|
29 | 29 | i = len(path) |
|
30 | 30 | while i != -1: |
|
31 | 31 | yield path[:i], path[i + 1 :] |
|
32 | 32 | i = path.rfind(b'/', 0, i) |
|
33 | 33 | yield b'.', path |
|
34 | 34 | |
|
35 | 35 | |
|
36 | 36 | def normalize(path): |
|
37 | 37 | """We use posixpath.normpath to support cross-platform path format. |
|
38 | 38 | However, it doesn't handle None input. So we wrap it up.""" |
|
39 | 39 | if path is None: |
|
40 | 40 | return None |
|
41 | 41 | return posixpath.normpath(path) |
|
42 | 42 | |
|
43 | 43 | |
|
44 | 44 | class filemapper(object): |
|
45 | 45 | """Map and filter filenames when importing. |
|
46 | 46 | A name can be mapped to itself, a new name, or None (omit from new |
|
47 | 47 | repository).""" |
|
48 | 48 | |
|
49 | 49 | def __init__(self, ui, path=None): |
|
50 | 50 | self.ui = ui |
|
51 | 51 | self.include = {} |
|
52 | 52 | self.exclude = {} |
|
53 | 53 | self.rename = {} |
|
54 | 54 | self.targetprefixes = None |
|
55 | 55 | if path: |
|
56 | 56 | if self.parse(path): |
|
57 | 57 | raise error.Abort(_(b'errors in filemap')) |
|
58 | 58 | |
|
59 | 59 | def parse(self, path): |
|
60 | 60 | errs = 0 |
|
61 | 61 | |
|
62 | 62 | def check(name, mapping, listname): |
|
63 | 63 | if not name: |
|
64 | 64 | self.ui.warn( |
|
65 | 65 | _(b'%s:%d: path to %s is missing\n') |
|
66 | 66 | % (lex.infile, lex.lineno, listname) |
|
67 | 67 | ) |
|
68 | 68 | return 1 |
|
69 | 69 | if name in mapping: |
|
70 | 70 | self.ui.warn( |
|
71 | 71 | _(b'%s:%d: %r already in %s list\n') |
|
72 | 72 | % (lex.infile, lex.lineno, name, listname) |
|
73 | 73 | ) |
|
74 | 74 | return 1 |
|
75 | 75 | if name.startswith(b'/') or name.endswith(b'/') or b'//' in name: |
|
76 | 76 | self.ui.warn( |
|
77 | 77 | _(b'%s:%d: superfluous / in %s %r\n') |
|
78 | 78 | % (lex.infile, lex.lineno, listname, pycompat.bytestr(name)) |
|
79 | 79 | ) |
|
80 | 80 | return 1 |
|
81 | 81 | return 0 |
|
82 | 82 | |
|
83 | 83 | lex = common.shlexer( |
|
84 | 84 | filepath=path, wordchars=b'!@#$%^&*()-=+[]{}|;:,./<>?' |
|
85 | 85 | ) |
|
86 | 86 | cmd = lex.get_token() |
|
87 | 87 | while cmd: |
|
88 | 88 | if cmd == b'include': |
|
89 | 89 | name = normalize(lex.get_token()) |
|
90 | 90 | errs += check(name, self.exclude, b'exclude') |
|
91 | 91 | self.include[name] = name |
|
92 | 92 | elif cmd == b'exclude': |
|
93 | 93 | name = normalize(lex.get_token()) |
|
94 | 94 | errs += check(name, self.include, b'include') |
|
95 | 95 | errs += check(name, self.rename, b'rename') |
|
96 | 96 | self.exclude[name] = name |
|
97 | 97 | elif cmd == b'rename': |
|
98 | 98 | src = normalize(lex.get_token()) |
|
99 | 99 | dest = normalize(lex.get_token()) |
|
100 | 100 | errs += check(src, self.exclude, b'exclude') |
|
101 | 101 | self.rename[src] = dest |
|
102 | 102 | elif cmd == b'source': |
|
103 | 103 | errs += self.parse(normalize(lex.get_token())) |
|
104 | 104 | else: |
|
105 | 105 | self.ui.warn( |
|
106 | 106 | _(b'%s:%d: unknown directive %r\n') |
|
107 | 107 | % (lex.infile, lex.lineno, pycompat.bytestr(cmd)) |
|
108 | 108 | ) |
|
109 | 109 | errs += 1 |
|
110 | 110 | cmd = lex.get_token() |
|
111 | 111 | return errs |
|
112 | 112 | |
|
113 | 113 | def lookup(self, name, mapping): |
|
114 | 114 | name = normalize(name) |
|
115 | 115 | for pre, suf in rpairs(name): |
|
116 | 116 | try: |
|
117 | 117 | return mapping[pre], pre, suf |
|
118 | 118 | except KeyError: |
|
119 | 119 | pass |
|
120 | 120 | return b'', name, b'' |
|
121 | 121 | |
|
122 | 122 | def istargetfile(self, filename): |
|
123 | 123 | """Return true if the given target filename is covered as a destination |
|
124 | 124 | of the filemap. This is useful for identifying what parts of the target |
|
125 | 125 | repo belong to the source repo and what parts don't.""" |
|
126 | 126 | if self.targetprefixes is None: |
|
127 | 127 | self.targetprefixes = set() |
|
128 |
for before, after in |
|
|
128 | for before, after in self.rename.items(): | |
|
129 | 129 | self.targetprefixes.add(after) |
|
130 | 130 | |
|
131 | 131 | # If "." is a target, then all target files are considered from the |
|
132 | 132 | # source. |
|
133 | 133 | if not self.targetprefixes or b'.' in self.targetprefixes: |
|
134 | 134 | return True |
|
135 | 135 | |
|
136 | 136 | filename = normalize(filename) |
|
137 | 137 | for pre, suf in rpairs(filename): |
|
138 | 138 | # This check is imperfect since it doesn't account for the |
|
139 | 139 | # include/exclude list, but it should work in filemaps that don't |
|
140 | 140 | # apply include/exclude to the same source directories they are |
|
141 | 141 | # renaming. |
|
142 | 142 | if pre in self.targetprefixes: |
|
143 | 143 | return True |
|
144 | 144 | return False |
|
145 | 145 | |
|
146 | 146 | def __call__(self, name): |
|
147 | 147 | if self.include: |
|
148 | 148 | inc = self.lookup(name, self.include)[0] |
|
149 | 149 | else: |
|
150 | 150 | inc = name |
|
151 | 151 | if self.exclude: |
|
152 | 152 | exc = self.lookup(name, self.exclude)[0] |
|
153 | 153 | else: |
|
154 | 154 | exc = b'' |
|
155 | 155 | if (not self.include and exc) or (len(inc) <= len(exc)): |
|
156 | 156 | return None |
|
157 | 157 | newpre, pre, suf = self.lookup(name, self.rename) |
|
158 | 158 | if newpre: |
|
159 | 159 | if newpre == b'.': |
|
160 | 160 | return suf |
|
161 | 161 | if suf: |
|
162 | 162 | if newpre.endswith(b'/'): |
|
163 | 163 | return newpre + suf |
|
164 | 164 | return newpre + b'/' + suf |
|
165 | 165 | return newpre |
|
166 | 166 | return name |
|
167 | 167 | |
|
168 | 168 | def active(self): |
|
169 | 169 | return bool(self.include or self.exclude or self.rename) |
|
170 | 170 | |
|
171 | 171 | |
|
172 | 172 | # This class does two additional things compared to a regular source: |
|
173 | 173 | # |
|
174 | 174 | # - Filter and rename files. This is mostly wrapped by the filemapper |
|
175 | 175 | # class above. We hide the original filename in the revision that is |
|
176 | 176 | # returned by getchanges to be able to find things later in getfile. |
|
177 | 177 | # |
|
178 | 178 | # - Return only revisions that matter for the files we're interested in. |
|
179 | 179 | # This involves rewriting the parents of the original revision to |
|
180 | 180 | # create a graph that is restricted to those revisions. |
|
181 | 181 | # |
|
182 | 182 | # This set of revisions includes not only revisions that directly |
|
183 | 183 | # touch files we're interested in, but also merges that merge two |
|
184 | 184 | # or more interesting revisions. |
|
185 | 185 | |
|
186 | 186 | |
|
187 | 187 | class filemap_source(common.converter_source): |
|
188 | 188 | def __init__(self, ui, baseconverter, filemap): |
|
189 | 189 | super(filemap_source, self).__init__(ui, baseconverter.repotype) |
|
190 | 190 | self.base = baseconverter |
|
191 | 191 | self.filemapper = filemapper(ui, filemap) |
|
192 | 192 | self.commits = {} |
|
193 | 193 | # if a revision rev has parent p in the original revision graph, then |
|
194 | 194 | # rev will have parent self.parentmap[p] in the restricted graph. |
|
195 | 195 | self.parentmap = {} |
|
196 | 196 | # self.wantedancestors[rev] is the set of all ancestors of rev that |
|
197 | 197 | # are in the restricted graph. |
|
198 | 198 | self.wantedancestors = {} |
|
199 | 199 | self.convertedorder = None |
|
200 | 200 | self._rebuilt = False |
|
201 | 201 | self.origparents = {} |
|
202 | 202 | self.children = {} |
|
203 | 203 | self.seenchildren = {} |
|
204 | 204 | # experimental config: convert.ignoreancestorcheck |
|
205 | 205 | self.ignoreancestorcheck = self.ui.configbool( |
|
206 | 206 | b'convert', b'ignoreancestorcheck' |
|
207 | 207 | ) |
|
208 | 208 | |
|
209 | 209 | def before(self): |
|
210 | 210 | self.base.before() |
|
211 | 211 | |
|
212 | 212 | def after(self): |
|
213 | 213 | self.base.after() |
|
214 | 214 | |
|
215 | 215 | def setrevmap(self, revmap): |
|
216 | 216 | # rebuild our state to make things restartable |
|
217 | 217 | # |
|
218 | 218 | # To avoid calling getcommit for every revision that has already |
|
219 | 219 | # been converted, we rebuild only the parentmap, delaying the |
|
220 | 220 | # rebuild of wantedancestors until we need it (i.e. until a |
|
221 | 221 | # merge). |
|
222 | 222 | # |
|
223 | 223 | # We assume the order argument lists the revisions in |
|
224 | 224 | # topological order, so that we can infer which revisions were |
|
225 | 225 | # wanted by previous runs. |
|
226 | 226 | self._rebuilt = not revmap |
|
227 | 227 | seen = {SKIPREV: SKIPREV} |
|
228 | 228 | dummyset = set() |
|
229 | 229 | converted = [] |
|
230 | 230 | for rev in revmap.order: |
|
231 | 231 | mapped = revmap[rev] |
|
232 | 232 | wanted = mapped not in seen |
|
233 | 233 | if wanted: |
|
234 | 234 | seen[mapped] = rev |
|
235 | 235 | self.parentmap[rev] = rev |
|
236 | 236 | else: |
|
237 | 237 | self.parentmap[rev] = seen[mapped] |
|
238 | 238 | self.wantedancestors[rev] = dummyset |
|
239 | 239 | arg = seen[mapped] |
|
240 | 240 | if arg == SKIPREV: |
|
241 | 241 | arg = None |
|
242 | 242 | converted.append((rev, wanted, arg)) |
|
243 | 243 | self.convertedorder = converted |
|
244 | 244 | return self.base.setrevmap(revmap) |
|
245 | 245 | |
|
246 | 246 | def rebuild(self): |
|
247 | 247 | if self._rebuilt: |
|
248 | 248 | return True |
|
249 | 249 | self._rebuilt = True |
|
250 | 250 | self.parentmap.clear() |
|
251 | 251 | self.wantedancestors.clear() |
|
252 | 252 | self.seenchildren.clear() |
|
253 | 253 | for rev, wanted, arg in self.convertedorder: |
|
254 | 254 | if rev not in self.origparents: |
|
255 | 255 | try: |
|
256 | 256 | self.origparents[rev] = self.getcommit(rev).parents |
|
257 | 257 | except error.RepoLookupError: |
|
258 | 258 | self.ui.debug(b"unknown revmap source: %s\n" % rev) |
|
259 | 259 | continue |
|
260 | 260 | if arg is not None: |
|
261 | 261 | self.children[arg] = self.children.get(arg, 0) + 1 |
|
262 | 262 | |
|
263 | 263 | for rev, wanted, arg in self.convertedorder: |
|
264 | 264 | try: |
|
265 | 265 | parents = self.origparents[rev] |
|
266 | 266 | except KeyError: |
|
267 | 267 | continue # unknown revmap source |
|
268 | 268 | if wanted: |
|
269 | 269 | self.mark_wanted(rev, parents) |
|
270 | 270 | else: |
|
271 | 271 | self.mark_not_wanted(rev, arg) |
|
272 | 272 | self._discard(arg, *parents) |
|
273 | 273 | |
|
274 | 274 | return True |
|
275 | 275 | |
|
276 | 276 | def getheads(self): |
|
277 | 277 | return self.base.getheads() |
|
278 | 278 | |
|
279 | 279 | def getcommit(self, rev): |
|
280 | 280 | # We want to save a reference to the commit objects to be able |
|
281 | 281 | # to rewrite their parents later on. |
|
282 | 282 | c = self.commits[rev] = self.base.getcommit(rev) |
|
283 | 283 | for p in c.parents: |
|
284 | 284 | self.children[p] = self.children.get(p, 0) + 1 |
|
285 | 285 | return c |
|
286 | 286 | |
|
287 | 287 | def numcommits(self): |
|
288 | 288 | return self.base.numcommits() |
|
289 | 289 | |
|
290 | 290 | def _cachedcommit(self, rev): |
|
291 | 291 | if rev in self.commits: |
|
292 | 292 | return self.commits[rev] |
|
293 | 293 | return self.base.getcommit(rev) |
|
294 | 294 | |
|
295 | 295 | def _discard(self, *revs): |
|
296 | 296 | for r in revs: |
|
297 | 297 | if r is None: |
|
298 | 298 | continue |
|
299 | 299 | self.seenchildren[r] = self.seenchildren.get(r, 0) + 1 |
|
300 | 300 | if self.seenchildren[r] == self.children[r]: |
|
301 | 301 | self.wantedancestors.pop(r, None) |
|
302 | 302 | self.parentmap.pop(r, None) |
|
303 | 303 | del self.seenchildren[r] |
|
304 | 304 | if self._rebuilt: |
|
305 | 305 | del self.children[r] |
|
306 | 306 | |
|
307 | 307 | def wanted(self, rev, i): |
|
308 | 308 | # Return True if we're directly interested in rev. |
|
309 | 309 | # |
|
310 | 310 | # i is an index selecting one of the parents of rev (if rev |
|
311 | 311 | # has no parents, i is None). getchangedfiles will give us |
|
312 | 312 | # the list of files that are different in rev and in the parent |
|
313 | 313 | # indicated by i. If we're interested in any of these files, |
|
314 | 314 | # we're interested in rev. |
|
315 | 315 | try: |
|
316 | 316 | files = self.base.getchangedfiles(rev, i) |
|
317 | 317 | except NotImplementedError: |
|
318 | 318 | raise error.Abort(_(b"source repository doesn't support --filemap")) |
|
319 | 319 | for f in files: |
|
320 | 320 | if self.filemapper(f): |
|
321 | 321 | return True |
|
322 | 322 | |
|
323 | 323 | # The include directive is documented to include nothing else (though |
|
324 | 324 | # valid branch closes are included). |
|
325 | 325 | if self.filemapper.include: |
|
326 | 326 | return False |
|
327 | 327 | |
|
328 | 328 | # Allow empty commits in the source revision through. The getchanges() |
|
329 | 329 | # method doesn't even bother calling this if it determines that the |
|
330 | 330 | # close marker is significant (i.e. all of the branch ancestors weren't |
|
331 | 331 | # eliminated). Therefore if there *is* a close marker, getchanges() |
|
332 | 332 | # doesn't consider it significant, and this revision should be dropped. |
|
333 | 333 | return not files and b'close' not in self.commits[rev].extra |
|
334 | 334 | |
|
335 | 335 | def mark_not_wanted(self, rev, p): |
|
336 | 336 | # Mark rev as not interesting and update data structures. |
|
337 | 337 | |
|
338 | 338 | if p is None: |
|
339 | 339 | # A root revision. Use SKIPREV to indicate that it doesn't |
|
340 | 340 | # map to any revision in the restricted graph. Put SKIPREV |
|
341 | 341 | # in the set of wanted ancestors to simplify code elsewhere |
|
342 | 342 | self.parentmap[rev] = SKIPREV |
|
343 | 343 | self.wantedancestors[rev] = {SKIPREV} |
|
344 | 344 | return |
|
345 | 345 | |
|
346 | 346 | # Reuse the data from our parent. |
|
347 | 347 | self.parentmap[rev] = self.parentmap[p] |
|
348 | 348 | self.wantedancestors[rev] = self.wantedancestors[p] |
|
349 | 349 | |
|
350 | 350 | def mark_wanted(self, rev, parents): |
|
351 | 351 | # Mark rev ss wanted and update data structures. |
|
352 | 352 | |
|
353 | 353 | # rev will be in the restricted graph, so children of rev in |
|
354 | 354 | # the original graph should still have rev as a parent in the |
|
355 | 355 | # restricted graph. |
|
356 | 356 | self.parentmap[rev] = rev |
|
357 | 357 | |
|
358 | 358 | # The set of wanted ancestors of rev is the union of the sets |
|
359 | 359 | # of wanted ancestors of its parents. Plus rev itself. |
|
360 | 360 | wrev = set() |
|
361 | 361 | for p in parents: |
|
362 | 362 | if p in self.wantedancestors: |
|
363 | 363 | wrev.update(self.wantedancestors[p]) |
|
364 | 364 | else: |
|
365 | 365 | self.ui.warn( |
|
366 | 366 | _(b'warning: %s parent %s is missing\n') % (rev, p) |
|
367 | 367 | ) |
|
368 | 368 | wrev.add(rev) |
|
369 | 369 | self.wantedancestors[rev] = wrev |
|
370 | 370 | |
|
371 | 371 | def getchanges(self, rev, full): |
|
372 | 372 | parents = self.commits[rev].parents |
|
373 | 373 | if len(parents) > 1 and not self.ignoreancestorcheck: |
|
374 | 374 | self.rebuild() |
|
375 | 375 | |
|
376 | 376 | # To decide whether we're interested in rev we: |
|
377 | 377 | # |
|
378 | 378 | # - calculate what parents rev will have if it turns out we're |
|
379 | 379 | # interested in it. If it's going to have more than 1 parent, |
|
380 | 380 | # we're interested in it. |
|
381 | 381 | # |
|
382 | 382 | # - otherwise, we'll compare it with the single parent we found. |
|
383 | 383 | # If any of the files we're interested in is different in the |
|
384 | 384 | # the two revisions, we're interested in rev. |
|
385 | 385 | |
|
386 | 386 | # A parent p is interesting if its mapped version (self.parentmap[p]): |
|
387 | 387 | # - is not SKIPREV |
|
388 | 388 | # - is still not in the list of parents (we don't want duplicates) |
|
389 | 389 | # - is not an ancestor of the mapped versions of the other parents or |
|
390 | 390 | # there is no parent in the same branch than the current revision. |
|
391 | 391 | mparents = [] |
|
392 | 392 | knownparents = set() |
|
393 | 393 | branch = self.commits[rev].branch |
|
394 | 394 | hasbranchparent = False |
|
395 | 395 | for i, p1 in enumerate(parents): |
|
396 | 396 | mp1 = self.parentmap[p1] |
|
397 | 397 | if mp1 == SKIPREV or mp1 in knownparents: |
|
398 | 398 | continue |
|
399 | 399 | |
|
400 | 400 | isancestor = not self.ignoreancestorcheck and any( |
|
401 | 401 | p2 |
|
402 | 402 | for p2 in parents |
|
403 | 403 | if p1 != p2 |
|
404 | 404 | and mp1 != self.parentmap[p2] |
|
405 | 405 | and mp1 in self.wantedancestors[p2] |
|
406 | 406 | ) |
|
407 | 407 | if not isancestor and not hasbranchparent and len(parents) > 1: |
|
408 | 408 | # This could be expensive, avoid unnecessary calls. |
|
409 | 409 | if self._cachedcommit(p1).branch == branch: |
|
410 | 410 | hasbranchparent = True |
|
411 | 411 | mparents.append((p1, mp1, i, isancestor)) |
|
412 | 412 | knownparents.add(mp1) |
|
413 | 413 | # Discard parents ancestors of other parents if there is a |
|
414 | 414 | # non-ancestor one on the same branch than current revision. |
|
415 | 415 | if hasbranchparent: |
|
416 | 416 | mparents = [p for p in mparents if not p[3]] |
|
417 | 417 | wp = None |
|
418 | 418 | if mparents: |
|
419 | 419 | wp = max(p[2] for p in mparents) |
|
420 | 420 | mparents = [p[1] for p in mparents] |
|
421 | 421 | elif parents: |
|
422 | 422 | wp = 0 |
|
423 | 423 | |
|
424 | 424 | self.origparents[rev] = parents |
|
425 | 425 | |
|
426 | 426 | closed = False |
|
427 | 427 | if b'close' in self.commits[rev].extra: |
|
428 | 428 | # A branch closing revision is only useful if one of its |
|
429 | 429 | # parents belong to the branch being closed |
|
430 | 430 | pbranches = [self._cachedcommit(p).branch for p in mparents] |
|
431 | 431 | if branch in pbranches: |
|
432 | 432 | closed = True |
|
433 | 433 | |
|
434 | 434 | if len(mparents) < 2 and not closed and not self.wanted(rev, wp): |
|
435 | 435 | # We don't want this revision. |
|
436 | 436 | # Update our state and tell the convert process to map this |
|
437 | 437 | # revision to the same revision its parent as mapped to. |
|
438 | 438 | p = None |
|
439 | 439 | if parents: |
|
440 | 440 | p = parents[wp] |
|
441 | 441 | self.mark_not_wanted(rev, p) |
|
442 | 442 | self.convertedorder.append((rev, False, p)) |
|
443 | 443 | self._discard(*parents) |
|
444 | 444 | return self.parentmap[rev] |
|
445 | 445 | |
|
446 | 446 | # We want this revision. |
|
447 | 447 | # Rewrite the parents of the commit object |
|
448 | 448 | self.commits[rev].parents = mparents |
|
449 | 449 | self.mark_wanted(rev, parents) |
|
450 | 450 | self.convertedorder.append((rev, True, None)) |
|
451 | 451 | self._discard(*parents) |
|
452 | 452 | |
|
453 | 453 | # Get the real changes and do the filtering/mapping. To be |
|
454 | 454 | # able to get the files later on in getfile, we hide the |
|
455 | 455 | # original filename in the rev part of the return value. |
|
456 | 456 | changes, copies, cleanp2 = self.base.getchanges(rev, full) |
|
457 | 457 | files = {} |
|
458 | 458 | ncleanp2 = set(cleanp2) |
|
459 | 459 | for f, r in changes: |
|
460 | 460 | newf = self.filemapper(f) |
|
461 | 461 | if newf and (newf != f or newf not in files): |
|
462 | 462 | files[newf] = (f, r) |
|
463 | 463 | if newf != f: |
|
464 | 464 | ncleanp2.discard(f) |
|
465 | 465 | files = sorted(files.items()) |
|
466 | 466 | |
|
467 | 467 | ncopies = {} |
|
468 | 468 | for c in copies: |
|
469 | 469 | newc = self.filemapper(c) |
|
470 | 470 | if newc: |
|
471 | 471 | newsource = self.filemapper(copies[c]) |
|
472 | 472 | if newsource: |
|
473 | 473 | ncopies[newc] = newsource |
|
474 | 474 | |
|
475 | 475 | return files, ncopies, ncleanp2 |
|
476 | 476 | |
|
477 | 477 | def targetfilebelongstosource(self, targetfilename): |
|
478 | 478 | return self.filemapper.istargetfile(targetfilename) |
|
479 | 479 | |
|
480 | 480 | def getfile(self, name, rev): |
|
481 | 481 | realname, realrev = rev |
|
482 | 482 | return self.base.getfile(realname, realrev) |
|
483 | 483 | |
|
484 | 484 | def gettags(self): |
|
485 | 485 | return self.base.gettags() |
|
486 | 486 | |
|
487 | 487 | def hasnativeorder(self): |
|
488 | 488 | return self.base.hasnativeorder() |
|
489 | 489 | |
|
490 | 490 | def lookuprev(self, rev): |
|
491 | 491 | return self.base.lookuprev(rev) |
|
492 | 492 | |
|
493 | 493 | def getbookmarks(self): |
|
494 | 494 | return self.base.getbookmarks() |
|
495 | 495 | |
|
496 | 496 | def converted(self, rev, sinkrev): |
|
497 | 497 | self.base.converted(rev, sinkrev) |
@@ -1,732 +1,732 b'' | |||
|
1 | 1 | # hg.py - hg backend for convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | # Notes for hg->hg conversion: |
|
9 | 9 | # |
|
10 | 10 | # * Old versions of Mercurial didn't trim the whitespace from the ends |
|
11 | 11 | # of commit messages, but new versions do. Changesets created by |
|
12 | 12 | # those older versions, then converted, may thus have different |
|
13 | 13 | # hashes for changesets that are otherwise identical. |
|
14 | 14 | # |
|
15 | 15 | # * Using "--config convert.hg.saverev=true" will make the source |
|
16 | 16 | # identifier to be stored in the converted revision. This will cause |
|
17 | 17 | # the converted revision to have a different identity than the |
|
18 | 18 | # source. |
|
19 | 19 | |
|
20 | 20 | import os |
|
21 | 21 | import re |
|
22 | 22 | import time |
|
23 | 23 | |
|
24 | 24 | from mercurial.i18n import _ |
|
25 | 25 | from mercurial.pycompat import open |
|
26 | 26 | from mercurial.node import ( |
|
27 | 27 | bin, |
|
28 | 28 | hex, |
|
29 | 29 | sha1nodeconstants, |
|
30 | 30 | ) |
|
31 | 31 | from mercurial import ( |
|
32 | 32 | bookmarks, |
|
33 | 33 | context, |
|
34 | 34 | error, |
|
35 | 35 | exchange, |
|
36 | 36 | hg, |
|
37 | 37 | lock as lockmod, |
|
38 | 38 | logcmdutil, |
|
39 | 39 | merge as mergemod, |
|
40 | 40 | mergestate, |
|
41 | 41 | phases, |
|
42 | 42 | pycompat, |
|
43 | 43 | util, |
|
44 | 44 | ) |
|
45 | 45 | from mercurial.utils import dateutil |
|
46 | 46 | |
|
47 | 47 | stringio = util.stringio |
|
48 | 48 | |
|
49 | 49 | from . import common |
|
50 | 50 | |
|
51 | 51 | mapfile = common.mapfile |
|
52 | 52 | NoRepo = common.NoRepo |
|
53 | 53 | |
|
54 | 54 | sha1re = re.compile(br'\b[0-9a-f]{12,40}\b') |
|
55 | 55 | |
|
56 | 56 | |
|
57 | 57 | class mercurial_sink(common.converter_sink): |
|
58 | 58 | def __init__(self, ui, repotype, path): |
|
59 | 59 | common.converter_sink.__init__(self, ui, repotype, path) |
|
60 | 60 | self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames') |
|
61 | 61 | self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches') |
|
62 | 62 | self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch') |
|
63 | 63 | self.lastbranch = None |
|
64 | 64 | if os.path.isdir(path) and len(os.listdir(path)) > 0: |
|
65 | 65 | try: |
|
66 | 66 | self.repo = hg.repository(self.ui, path) |
|
67 | 67 | if not self.repo.local(): |
|
68 | 68 | raise NoRepo( |
|
69 | 69 | _(b'%s is not a local Mercurial repository') % path |
|
70 | 70 | ) |
|
71 | 71 | except error.RepoError as err: |
|
72 | 72 | ui.traceback() |
|
73 | 73 | raise NoRepo(err.args[0]) |
|
74 | 74 | else: |
|
75 | 75 | try: |
|
76 | 76 | ui.status(_(b'initializing destination %s repository\n') % path) |
|
77 | 77 | self.repo = hg.repository(self.ui, path, create=True) |
|
78 | 78 | if not self.repo.local(): |
|
79 | 79 | raise NoRepo( |
|
80 | 80 | _(b'%s is not a local Mercurial repository') % path |
|
81 | 81 | ) |
|
82 | 82 | self.created.append(path) |
|
83 | 83 | except error.RepoError: |
|
84 | 84 | ui.traceback() |
|
85 | 85 | raise NoRepo( |
|
86 | 86 | _(b"could not create hg repository %s as sink") % path |
|
87 | 87 | ) |
|
88 | 88 | self.lock = None |
|
89 | 89 | self.wlock = None |
|
90 | 90 | self.filemapmode = False |
|
91 | 91 | self.subrevmaps = {} |
|
92 | 92 | |
|
93 | 93 | def before(self): |
|
94 | 94 | self.ui.debug(b'run hg sink pre-conversion action\n') |
|
95 | 95 | self.wlock = self.repo.wlock() |
|
96 | 96 | self.lock = self.repo.lock() |
|
97 | 97 | |
|
98 | 98 | def after(self): |
|
99 | 99 | self.ui.debug(b'run hg sink post-conversion action\n') |
|
100 | 100 | if self.lock: |
|
101 | 101 | self.lock.release() |
|
102 | 102 | if self.wlock: |
|
103 | 103 | self.wlock.release() |
|
104 | 104 | |
|
105 | 105 | def revmapfile(self): |
|
106 | 106 | return self.repo.vfs.join(b"shamap") |
|
107 | 107 | |
|
108 | 108 | def authorfile(self): |
|
109 | 109 | return self.repo.vfs.join(b"authormap") |
|
110 | 110 | |
|
111 | 111 | def setbranch(self, branch, pbranches): |
|
112 | 112 | if not self.clonebranches: |
|
113 | 113 | return |
|
114 | 114 | |
|
115 | 115 | setbranch = branch != self.lastbranch |
|
116 | 116 | self.lastbranch = branch |
|
117 | 117 | if not branch: |
|
118 | 118 | branch = b'default' |
|
119 | 119 | pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches] |
|
120 | 120 | |
|
121 | 121 | branchpath = os.path.join(self.path, branch) |
|
122 | 122 | if setbranch: |
|
123 | 123 | self.after() |
|
124 | 124 | try: |
|
125 | 125 | self.repo = hg.repository(self.ui, branchpath) |
|
126 | 126 | except Exception: |
|
127 | 127 | self.repo = hg.repository(self.ui, branchpath, create=True) |
|
128 | 128 | self.before() |
|
129 | 129 | |
|
130 | 130 | # pbranches may bring revisions from other branches (merge parents) |
|
131 | 131 | # Make sure we have them, or pull them. |
|
132 | 132 | missings = {} |
|
133 | 133 | for b in pbranches: |
|
134 | 134 | try: |
|
135 | 135 | self.repo.lookup(b[0]) |
|
136 | 136 | except Exception: |
|
137 | 137 | missings.setdefault(b[1], []).append(b[0]) |
|
138 | 138 | |
|
139 | 139 | if missings: |
|
140 | 140 | self.after() |
|
141 |
for pbranch, heads in sorted( |
|
|
141 | for pbranch, heads in sorted(missings.items()): | |
|
142 | 142 | pbranchpath = os.path.join(self.path, pbranch) |
|
143 | 143 | prepo = hg.peer(self.ui, {}, pbranchpath) |
|
144 | 144 | self.ui.note( |
|
145 | 145 | _(b'pulling from %s into %s\n') % (pbranch, branch) |
|
146 | 146 | ) |
|
147 | 147 | exchange.pull( |
|
148 | 148 | self.repo, prepo, heads=[prepo.lookup(h) for h in heads] |
|
149 | 149 | ) |
|
150 | 150 | self.before() |
|
151 | 151 | |
|
152 | 152 | def _rewritetags(self, source, revmap, data): |
|
153 | 153 | fp = stringio() |
|
154 | 154 | for line in data.splitlines(): |
|
155 | 155 | s = line.split(b' ', 1) |
|
156 | 156 | if len(s) != 2: |
|
157 | 157 | self.ui.warn(_(b'invalid tag entry: "%s"\n') % line) |
|
158 | 158 | fp.write(b'%s\n' % line) # Bogus, but keep for hash stability |
|
159 | 159 | continue |
|
160 | 160 | revid = revmap.get(source.lookuprev(s[0])) |
|
161 | 161 | if not revid: |
|
162 | 162 | if s[0] == sha1nodeconstants.nullhex: |
|
163 | 163 | revid = s[0] |
|
164 | 164 | else: |
|
165 | 165 | # missing, but keep for hash stability |
|
166 | 166 | self.ui.warn(_(b'missing tag entry: "%s"\n') % line) |
|
167 | 167 | fp.write(b'%s\n' % line) |
|
168 | 168 | continue |
|
169 | 169 | fp.write(b'%s %s\n' % (revid, s[1])) |
|
170 | 170 | return fp.getvalue() |
|
171 | 171 | |
|
172 | 172 | def _rewritesubstate(self, source, data): |
|
173 | 173 | fp = stringio() |
|
174 | 174 | for line in data.splitlines(): |
|
175 | 175 | s = line.split(b' ', 1) |
|
176 | 176 | if len(s) != 2: |
|
177 | 177 | continue |
|
178 | 178 | |
|
179 | 179 | revid = s[0] |
|
180 | 180 | subpath = s[1] |
|
181 | 181 | if revid != sha1nodeconstants.nullhex: |
|
182 | 182 | revmap = self.subrevmaps.get(subpath) |
|
183 | 183 | if revmap is None: |
|
184 | 184 | revmap = mapfile( |
|
185 | 185 | self.ui, self.repo.wjoin(subpath, b'.hg/shamap') |
|
186 | 186 | ) |
|
187 | 187 | self.subrevmaps[subpath] = revmap |
|
188 | 188 | |
|
189 | 189 | # It is reasonable that one or more of the subrepos don't |
|
190 | 190 | # need to be converted, in which case they can be cloned |
|
191 | 191 | # into place instead of converted. Therefore, only warn |
|
192 | 192 | # once. |
|
193 | 193 | msg = _(b'no ".hgsubstate" updates will be made for "%s"\n') |
|
194 | 194 | if len(revmap) == 0: |
|
195 | 195 | sub = self.repo.wvfs.reljoin(subpath, b'.hg') |
|
196 | 196 | |
|
197 | 197 | if self.repo.wvfs.exists(sub): |
|
198 | 198 | self.ui.warn(msg % subpath) |
|
199 | 199 | |
|
200 | 200 | newid = revmap.get(revid) |
|
201 | 201 | if not newid: |
|
202 | 202 | if len(revmap) > 0: |
|
203 | 203 | self.ui.warn( |
|
204 | 204 | _(b"%s is missing from %s/.hg/shamap\n") |
|
205 | 205 | % (revid, subpath) |
|
206 | 206 | ) |
|
207 | 207 | else: |
|
208 | 208 | revid = newid |
|
209 | 209 | |
|
210 | 210 | fp.write(b'%s %s\n' % (revid, subpath)) |
|
211 | 211 | |
|
212 | 212 | return fp.getvalue() |
|
213 | 213 | |
|
214 | 214 | def _calculatemergedfiles(self, source, p1ctx, p2ctx): |
|
215 | 215 | """Calculates the files from p2 that we need to pull in when merging p1 |
|
216 | 216 | and p2, given that the merge is coming from the given source. |
|
217 | 217 | |
|
218 | 218 | This prevents us from losing files that only exist in the target p2 and |
|
219 | 219 | that don't come from the source repo (like if you're merging multiple |
|
220 | 220 | repositories together). |
|
221 | 221 | """ |
|
222 | 222 | anc = [p1ctx.ancestor(p2ctx)] |
|
223 | 223 | # Calculate what files are coming from p2 |
|
224 | 224 | # TODO: mresult.commitinfo might be able to get that info |
|
225 | 225 | mresult = mergemod.calculateupdates( |
|
226 | 226 | self.repo, |
|
227 | 227 | p1ctx, |
|
228 | 228 | p2ctx, |
|
229 | 229 | anc, |
|
230 | 230 | branchmerge=True, |
|
231 | 231 | force=True, |
|
232 | 232 | acceptremote=False, |
|
233 | 233 | followcopies=False, |
|
234 | 234 | ) |
|
235 | 235 | |
|
236 | 236 | for file, (action, info, msg) in mresult.filemap(): |
|
237 | 237 | if source.targetfilebelongstosource(file): |
|
238 | 238 | # If the file belongs to the source repo, ignore the p2 |
|
239 | 239 | # since it will be covered by the existing fileset. |
|
240 | 240 | continue |
|
241 | 241 | |
|
242 | 242 | # If the file requires actual merging, abort. We don't have enough |
|
243 | 243 | # context to resolve merges correctly. |
|
244 | 244 | if action in mergestate.CONVERT_MERGE_ACTIONS: |
|
245 | 245 | raise error.Abort( |
|
246 | 246 | _( |
|
247 | 247 | b"unable to convert merge commit " |
|
248 | 248 | b"since target parents do not merge cleanly (file " |
|
249 | 249 | b"%s, parents %s and %s)" |
|
250 | 250 | ) |
|
251 | 251 | % (file, p1ctx, p2ctx) |
|
252 | 252 | ) |
|
253 | 253 | elif action == mergestate.ACTION_KEEP: |
|
254 | 254 | # 'keep' means nothing changed from p1 |
|
255 | 255 | continue |
|
256 | 256 | else: |
|
257 | 257 | # Any other change means we want to take the p2 version |
|
258 | 258 | yield file |
|
259 | 259 | |
|
260 | 260 | def putcommit( |
|
261 | 261 | self, files, copies, parents, commit, source, revmap, full, cleanp2 |
|
262 | 262 | ): |
|
263 | 263 | files = dict(files) |
|
264 | 264 | |
|
265 | 265 | def getfilectx(repo, memctx, f): |
|
266 | 266 | if p2ctx and f in p2files and f not in copies: |
|
267 | 267 | self.ui.debug(b'reusing %s from p2\n' % f) |
|
268 | 268 | try: |
|
269 | 269 | return p2ctx[f] |
|
270 | 270 | except error.ManifestLookupError: |
|
271 | 271 | # If the file doesn't exist in p2, then we're syncing a |
|
272 | 272 | # delete, so just return None. |
|
273 | 273 | return None |
|
274 | 274 | try: |
|
275 | 275 | v = files[f] |
|
276 | 276 | except KeyError: |
|
277 | 277 | return None |
|
278 | 278 | data, mode = source.getfile(f, v) |
|
279 | 279 | if data is None: |
|
280 | 280 | return None |
|
281 | 281 | if f == b'.hgtags': |
|
282 | 282 | data = self._rewritetags(source, revmap, data) |
|
283 | 283 | if f == b'.hgsubstate': |
|
284 | 284 | data = self._rewritesubstate(source, data) |
|
285 | 285 | return context.memfilectx( |
|
286 | 286 | self.repo, |
|
287 | 287 | memctx, |
|
288 | 288 | f, |
|
289 | 289 | data, |
|
290 | 290 | b'l' in mode, |
|
291 | 291 | b'x' in mode, |
|
292 | 292 | copies.get(f), |
|
293 | 293 | ) |
|
294 | 294 | |
|
295 | 295 | pl = [] |
|
296 | 296 | for p in parents: |
|
297 | 297 | if p not in pl: |
|
298 | 298 | pl.append(p) |
|
299 | 299 | parents = pl |
|
300 | 300 | nparents = len(parents) |
|
301 | 301 | if self.filemapmode and nparents == 1: |
|
302 | 302 | m1node = self.repo.changelog.read(bin(parents[0]))[0] |
|
303 | 303 | parent = parents[0] |
|
304 | 304 | |
|
305 | 305 | if len(parents) < 2: |
|
306 | 306 | parents.append(self.repo.nullid) |
|
307 | 307 | if len(parents) < 2: |
|
308 | 308 | parents.append(self.repo.nullid) |
|
309 | 309 | p2 = parents.pop(0) |
|
310 | 310 | |
|
311 | 311 | text = commit.desc |
|
312 | 312 | |
|
313 | 313 | sha1s = re.findall(sha1re, text) |
|
314 | 314 | for sha1 in sha1s: |
|
315 | 315 | oldrev = source.lookuprev(sha1) |
|
316 | 316 | newrev = revmap.get(oldrev) |
|
317 | 317 | if newrev is not None: |
|
318 | 318 | text = text.replace(sha1, newrev[: len(sha1)]) |
|
319 | 319 | |
|
320 | 320 | extra = commit.extra.copy() |
|
321 | 321 | |
|
322 | 322 | sourcename = self.repo.ui.config(b'convert', b'hg.sourcename') |
|
323 | 323 | if sourcename: |
|
324 | 324 | extra[b'convert_source'] = sourcename |
|
325 | 325 | |
|
326 | 326 | for label in ( |
|
327 | 327 | b'source', |
|
328 | 328 | b'transplant_source', |
|
329 | 329 | b'rebase_source', |
|
330 | 330 | b'intermediate-source', |
|
331 | 331 | ): |
|
332 | 332 | node = extra.get(label) |
|
333 | 333 | |
|
334 | 334 | if node is None: |
|
335 | 335 | continue |
|
336 | 336 | |
|
337 | 337 | # Only transplant stores its reference in binary |
|
338 | 338 | if label == b'transplant_source': |
|
339 | 339 | node = hex(node) |
|
340 | 340 | |
|
341 | 341 | newrev = revmap.get(node) |
|
342 | 342 | if newrev is not None: |
|
343 | 343 | if label == b'transplant_source': |
|
344 | 344 | newrev = bin(newrev) |
|
345 | 345 | |
|
346 | 346 | extra[label] = newrev |
|
347 | 347 | |
|
348 | 348 | if self.branchnames and commit.branch: |
|
349 | 349 | extra[b'branch'] = commit.branch |
|
350 | 350 | if commit.rev and commit.saverev: |
|
351 | 351 | extra[b'convert_revision'] = commit.rev |
|
352 | 352 | |
|
353 | 353 | while parents: |
|
354 | 354 | p1 = p2 |
|
355 | 355 | p2 = parents.pop(0) |
|
356 | 356 | p1ctx = self.repo[p1] |
|
357 | 357 | p2ctx = None |
|
358 | 358 | if p2 != self.repo.nullid: |
|
359 | 359 | p2ctx = self.repo[p2] |
|
360 | 360 | fileset = set(files) |
|
361 | 361 | if full: |
|
362 | 362 | fileset.update(self.repo[p1]) |
|
363 | 363 | fileset.update(self.repo[p2]) |
|
364 | 364 | |
|
365 | 365 | if p2ctx: |
|
366 | 366 | p2files = set(cleanp2) |
|
367 | 367 | for file in self._calculatemergedfiles(source, p1ctx, p2ctx): |
|
368 | 368 | p2files.add(file) |
|
369 | 369 | fileset.add(file) |
|
370 | 370 | |
|
371 | 371 | ctx = context.memctx( |
|
372 | 372 | self.repo, |
|
373 | 373 | (p1, p2), |
|
374 | 374 | text, |
|
375 | 375 | fileset, |
|
376 | 376 | getfilectx, |
|
377 | 377 | commit.author, |
|
378 | 378 | commit.date, |
|
379 | 379 | extra, |
|
380 | 380 | ) |
|
381 | 381 | |
|
382 | 382 | # We won't know if the conversion changes the node until after the |
|
383 | 383 | # commit, so copy the source's phase for now. |
|
384 | 384 | self.repo.ui.setconfig( |
|
385 | 385 | b'phases', |
|
386 | 386 | b'new-commit', |
|
387 | 387 | phases.phasenames[commit.phase], |
|
388 | 388 | b'convert', |
|
389 | 389 | ) |
|
390 | 390 | |
|
391 | 391 | with self.repo.transaction(b"convert") as tr: |
|
392 | 392 | if self.repo.ui.config(b'convert', b'hg.preserve-hash'): |
|
393 | 393 | origctx = commit.ctx |
|
394 | 394 | else: |
|
395 | 395 | origctx = None |
|
396 | 396 | node = hex(self.repo.commitctx(ctx, origctx=origctx)) |
|
397 | 397 | |
|
398 | 398 | # If the node value has changed, but the phase is lower than |
|
399 | 399 | # draft, set it back to draft since it hasn't been exposed |
|
400 | 400 | # anywhere. |
|
401 | 401 | if commit.rev != node: |
|
402 | 402 | ctx = self.repo[node] |
|
403 | 403 | if ctx.phase() < phases.draft: |
|
404 | 404 | phases.registernew( |
|
405 | 405 | self.repo, tr, phases.draft, [ctx.rev()] |
|
406 | 406 | ) |
|
407 | 407 | |
|
408 | 408 | text = b"(octopus merge fixup)\n" |
|
409 | 409 | p2 = node |
|
410 | 410 | |
|
411 | 411 | if self.filemapmode and nparents == 1: |
|
412 | 412 | man = self.repo.manifestlog.getstorage(b'') |
|
413 | 413 | mnode = self.repo.changelog.read(bin(p2))[0] |
|
414 | 414 | closed = b'close' in commit.extra |
|
415 | 415 | if not closed and not man.cmp(m1node, man.revision(mnode)): |
|
416 | 416 | self.ui.status(_(b"filtering out empty revision\n")) |
|
417 | 417 | self.repo.rollback(force=True) |
|
418 | 418 | return parent |
|
419 | 419 | return p2 |
|
420 | 420 | |
|
421 | 421 | def puttags(self, tags): |
|
422 | 422 | tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True) |
|
423 | 423 | tagparent = tagparent or self.repo.nullid |
|
424 | 424 | |
|
425 | 425 | oldlines = set() |
|
426 | 426 | for branch, heads in pycompat.iteritems(self.repo.branchmap()): |
|
427 | 427 | for h in heads: |
|
428 | 428 | if b'.hgtags' in self.repo[h]: |
|
429 | 429 | oldlines.update( |
|
430 | 430 | set(self.repo[h][b'.hgtags'].data().splitlines(True)) |
|
431 | 431 | ) |
|
432 | 432 | oldlines = sorted(list(oldlines)) |
|
433 | 433 | |
|
434 | 434 | newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags]) |
|
435 | 435 | if newlines == oldlines: |
|
436 | 436 | return None, None |
|
437 | 437 | |
|
438 | 438 | # if the old and new tags match, then there is nothing to update |
|
439 | 439 | oldtags = set() |
|
440 | 440 | newtags = set() |
|
441 | 441 | for line in oldlines: |
|
442 | 442 | s = line.strip().split(b' ', 1) |
|
443 | 443 | if len(s) != 2: |
|
444 | 444 | continue |
|
445 | 445 | oldtags.add(s[1]) |
|
446 | 446 | for line in newlines: |
|
447 | 447 | s = line.strip().split(b' ', 1) |
|
448 | 448 | if len(s) != 2: |
|
449 | 449 | continue |
|
450 | 450 | if s[1] not in oldtags: |
|
451 | 451 | newtags.add(s[1].strip()) |
|
452 | 452 | |
|
453 | 453 | if not newtags: |
|
454 | 454 | return None, None |
|
455 | 455 | |
|
456 | 456 | data = b"".join(newlines) |
|
457 | 457 | |
|
458 | 458 | def getfilectx(repo, memctx, f): |
|
459 | 459 | return context.memfilectx(repo, memctx, f, data, False, False, None) |
|
460 | 460 | |
|
461 | 461 | self.ui.status(_(b"updating tags\n")) |
|
462 | 462 | date = b"%d 0" % int(time.mktime(time.gmtime())) |
|
463 | 463 | extra = {b'branch': self.tagsbranch} |
|
464 | 464 | ctx = context.memctx( |
|
465 | 465 | self.repo, |
|
466 | 466 | (tagparent, None), |
|
467 | 467 | b"update tags", |
|
468 | 468 | [b".hgtags"], |
|
469 | 469 | getfilectx, |
|
470 | 470 | b"convert-repo", |
|
471 | 471 | date, |
|
472 | 472 | extra, |
|
473 | 473 | ) |
|
474 | 474 | node = self.repo.commitctx(ctx) |
|
475 | 475 | return hex(node), hex(tagparent) |
|
476 | 476 | |
|
477 | 477 | def setfilemapmode(self, active): |
|
478 | 478 | self.filemapmode = active |
|
479 | 479 | |
|
480 | 480 | def putbookmarks(self, updatedbookmark): |
|
481 | 481 | if not len(updatedbookmark): |
|
482 | 482 | return |
|
483 | 483 | wlock = lock = tr = None |
|
484 | 484 | try: |
|
485 | 485 | wlock = self.repo.wlock() |
|
486 | 486 | lock = self.repo.lock() |
|
487 | 487 | tr = self.repo.transaction(b'bookmark') |
|
488 | 488 | self.ui.status(_(b"updating bookmarks\n")) |
|
489 | 489 | destmarks = self.repo._bookmarks |
|
490 | 490 | changes = [ |
|
491 | 491 | (bookmark, bin(updatedbookmark[bookmark])) |
|
492 | 492 | for bookmark in updatedbookmark |
|
493 | 493 | ] |
|
494 | 494 | destmarks.applychanges(self.repo, tr, changes) |
|
495 | 495 | tr.close() |
|
496 | 496 | finally: |
|
497 | 497 | lockmod.release(lock, wlock, tr) |
|
498 | 498 | |
|
499 | 499 | def hascommitfrommap(self, rev): |
|
500 | 500 | # the exact semantics of clonebranches is unclear so we can't say no |
|
501 | 501 | return rev in self.repo or self.clonebranches |
|
502 | 502 | |
|
503 | 503 | def hascommitforsplicemap(self, rev): |
|
504 | 504 | if rev not in self.repo and self.clonebranches: |
|
505 | 505 | raise error.Abort( |
|
506 | 506 | _( |
|
507 | 507 | b'revision %s not found in destination ' |
|
508 | 508 | b'repository (lookups with clonebranches=true ' |
|
509 | 509 | b'are not implemented)' |
|
510 | 510 | ) |
|
511 | 511 | % rev |
|
512 | 512 | ) |
|
513 | 513 | return rev in self.repo |
|
514 | 514 | |
|
515 | 515 | |
|
516 | 516 | class mercurial_source(common.converter_source): |
|
517 | 517 | def __init__(self, ui, repotype, path, revs=None): |
|
518 | 518 | common.converter_source.__init__(self, ui, repotype, path, revs) |
|
519 | 519 | self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors') |
|
520 | 520 | self.ignored = set() |
|
521 | 521 | self.saverev = ui.configbool(b'convert', b'hg.saverev') |
|
522 | 522 | try: |
|
523 | 523 | self.repo = hg.repository(self.ui, path) |
|
524 | 524 | # try to provoke an exception if this isn't really a hg |
|
525 | 525 | # repo, but some other bogus compatible-looking url |
|
526 | 526 | if not self.repo.local(): |
|
527 | 527 | raise error.RepoError |
|
528 | 528 | except error.RepoError: |
|
529 | 529 | ui.traceback() |
|
530 | 530 | raise NoRepo(_(b"%s is not a local Mercurial repository") % path) |
|
531 | 531 | self.lastrev = None |
|
532 | 532 | self.lastctx = None |
|
533 | 533 | self._changescache = None, None |
|
534 | 534 | self.convertfp = None |
|
535 | 535 | # Restrict converted revisions to startrev descendants |
|
536 | 536 | startnode = ui.config(b'convert', b'hg.startrev') |
|
537 | 537 | hgrevs = ui.config(b'convert', b'hg.revs') |
|
538 | 538 | if hgrevs is None: |
|
539 | 539 | if startnode is not None: |
|
540 | 540 | try: |
|
541 | 541 | startnode = self.repo.lookup(startnode) |
|
542 | 542 | except error.RepoError: |
|
543 | 543 | raise error.Abort( |
|
544 | 544 | _(b'%s is not a valid start revision') % startnode |
|
545 | 545 | ) |
|
546 | 546 | startrev = self.repo.changelog.rev(startnode) |
|
547 | 547 | children = {startnode: 1} |
|
548 | 548 | for r in self.repo.changelog.descendants([startrev]): |
|
549 | 549 | children[self.repo.changelog.node(r)] = 1 |
|
550 | 550 | self.keep = children.__contains__ |
|
551 | 551 | else: |
|
552 | 552 | self.keep = util.always |
|
553 | 553 | if revs: |
|
554 | 554 | self._heads = [self.repo.lookup(r) for r in revs] |
|
555 | 555 | else: |
|
556 | 556 | self._heads = self.repo.heads() |
|
557 | 557 | else: |
|
558 | 558 | if revs or startnode is not None: |
|
559 | 559 | raise error.Abort( |
|
560 | 560 | _( |
|
561 | 561 | b'hg.revs cannot be combined with ' |
|
562 | 562 | b'hg.startrev or --rev' |
|
563 | 563 | ) |
|
564 | 564 | ) |
|
565 | 565 | nodes = set() |
|
566 | 566 | parents = set() |
|
567 | 567 | for r in logcmdutil.revrange(self.repo, [hgrevs]): |
|
568 | 568 | ctx = self.repo[r] |
|
569 | 569 | nodes.add(ctx.node()) |
|
570 | 570 | parents.update(p.node() for p in ctx.parents()) |
|
571 | 571 | self.keep = nodes.__contains__ |
|
572 | 572 | self._heads = nodes - parents |
|
573 | 573 | |
|
574 | 574 | def _changectx(self, rev): |
|
575 | 575 | if self.lastrev != rev: |
|
576 | 576 | self.lastctx = self.repo[rev] |
|
577 | 577 | self.lastrev = rev |
|
578 | 578 | return self.lastctx |
|
579 | 579 | |
|
580 | 580 | def _parents(self, ctx): |
|
581 | 581 | return [p for p in ctx.parents() if p and self.keep(p.node())] |
|
582 | 582 | |
|
583 | 583 | def getheads(self): |
|
584 | 584 | return [hex(h) for h in self._heads if self.keep(h)] |
|
585 | 585 | |
|
586 | 586 | def getfile(self, name, rev): |
|
587 | 587 | try: |
|
588 | 588 | fctx = self._changectx(rev)[name] |
|
589 | 589 | return fctx.data(), fctx.flags() |
|
590 | 590 | except error.LookupError: |
|
591 | 591 | return None, None |
|
592 | 592 | |
|
593 | 593 | def _changedfiles(self, ctx1, ctx2): |
|
594 | 594 | ma, r = [], [] |
|
595 | 595 | maappend = ma.append |
|
596 | 596 | rappend = r.append |
|
597 | 597 | d = ctx1.manifest().diff(ctx2.manifest()) |
|
598 |
for f, ((node1, flag1), (node2, flag2)) in |
|
|
598 | for f, ((node1, flag1), (node2, flag2)) in d.items(): | |
|
599 | 599 | if node2 is None: |
|
600 | 600 | rappend(f) |
|
601 | 601 | else: |
|
602 | 602 | maappend(f) |
|
603 | 603 | return ma, r |
|
604 | 604 | |
|
605 | 605 | def getchanges(self, rev, full): |
|
606 | 606 | ctx = self._changectx(rev) |
|
607 | 607 | parents = self._parents(ctx) |
|
608 | 608 | if full or not parents: |
|
609 | 609 | files = copyfiles = ctx.manifest() |
|
610 | 610 | if parents: |
|
611 | 611 | if self._changescache[0] == rev: |
|
612 | 612 | ma, r = self._changescache[1] |
|
613 | 613 | else: |
|
614 | 614 | ma, r = self._changedfiles(parents[0], ctx) |
|
615 | 615 | if not full: |
|
616 | 616 | files = ma + r |
|
617 | 617 | copyfiles = ma |
|
618 | 618 | # _getcopies() is also run for roots and before filtering so missing |
|
619 | 619 | # revlogs are detected early |
|
620 | 620 | copies = self._getcopies(ctx, parents, copyfiles) |
|
621 | 621 | cleanp2 = set() |
|
622 | 622 | if len(parents) == 2: |
|
623 | 623 | d = parents[1].manifest().diff(ctx.manifest(), clean=True) |
|
624 |
for f, value in |
|
|
624 | for f, value in d.items(): | |
|
625 | 625 | if value is None: |
|
626 | 626 | cleanp2.add(f) |
|
627 | 627 | changes = [(f, rev) for f in files if f not in self.ignored] |
|
628 | 628 | changes.sort() |
|
629 | 629 | return changes, copies, cleanp2 |
|
630 | 630 | |
|
631 | 631 | def _getcopies(self, ctx, parents, files): |
|
632 | 632 | copies = {} |
|
633 | 633 | for name in files: |
|
634 | 634 | if name in self.ignored: |
|
635 | 635 | continue |
|
636 | 636 | try: |
|
637 | 637 | copysource = ctx.filectx(name).copysource() |
|
638 | 638 | if copysource in self.ignored: |
|
639 | 639 | continue |
|
640 | 640 | # Ignore copy sources not in parent revisions |
|
641 | 641 | if not any(copysource in p for p in parents): |
|
642 | 642 | continue |
|
643 | 643 | copies[name] = copysource |
|
644 | 644 | except TypeError: |
|
645 | 645 | pass |
|
646 | 646 | except error.LookupError as e: |
|
647 | 647 | if not self.ignoreerrors: |
|
648 | 648 | raise |
|
649 | 649 | self.ignored.add(name) |
|
650 | 650 | self.ui.warn(_(b'ignoring: %s\n') % e) |
|
651 | 651 | return copies |
|
652 | 652 | |
|
653 | 653 | def getcommit(self, rev): |
|
654 | 654 | ctx = self._changectx(rev) |
|
655 | 655 | _parents = self._parents(ctx) |
|
656 | 656 | parents = [p.hex() for p in _parents] |
|
657 | 657 | optparents = [p.hex() for p in ctx.parents() if p and p not in _parents] |
|
658 | 658 | crev = rev |
|
659 | 659 | |
|
660 | 660 | return common.commit( |
|
661 | 661 | author=ctx.user(), |
|
662 | 662 | date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'), |
|
663 | 663 | desc=ctx.description(), |
|
664 | 664 | rev=crev, |
|
665 | 665 | parents=parents, |
|
666 | 666 | optparents=optparents, |
|
667 | 667 | branch=ctx.branch(), |
|
668 | 668 | extra=ctx.extra(), |
|
669 | 669 | sortkey=ctx.rev(), |
|
670 | 670 | saverev=self.saverev, |
|
671 | 671 | phase=ctx.phase(), |
|
672 | 672 | ctx=ctx, |
|
673 | 673 | ) |
|
674 | 674 | |
|
675 | 675 | def numcommits(self): |
|
676 | 676 | return len(self.repo) |
|
677 | 677 | |
|
678 | 678 | def gettags(self): |
|
679 | 679 | # This will get written to .hgtags, filter non global tags out. |
|
680 | 680 | tags = [ |
|
681 | 681 | t |
|
682 | 682 | for t in self.repo.tagslist() |
|
683 | 683 | if self.repo.tagtype(t[0]) == b'global' |
|
684 | 684 | ] |
|
685 | 685 | return {name: hex(node) for name, node in tags if self.keep(node)} |
|
686 | 686 | |
|
687 | 687 | def getchangedfiles(self, rev, i): |
|
688 | 688 | ctx = self._changectx(rev) |
|
689 | 689 | parents = self._parents(ctx) |
|
690 | 690 | if not parents and i is None: |
|
691 | 691 | i = 0 |
|
692 | 692 | ma, r = ctx.manifest().keys(), [] |
|
693 | 693 | else: |
|
694 | 694 | i = i or 0 |
|
695 | 695 | ma, r = self._changedfiles(parents[i], ctx) |
|
696 | 696 | ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)] |
|
697 | 697 | |
|
698 | 698 | if i == 0: |
|
699 | 699 | self._changescache = (rev, (ma, r)) |
|
700 | 700 | |
|
701 | 701 | return ma + r |
|
702 | 702 | |
|
703 | 703 | def converted(self, rev, destrev): |
|
704 | 704 | if self.convertfp is None: |
|
705 | 705 | self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab') |
|
706 | 706 | self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev))) |
|
707 | 707 | self.convertfp.flush() |
|
708 | 708 | |
|
709 | 709 | def before(self): |
|
710 | 710 | self.ui.debug(b'run hg source pre-conversion action\n') |
|
711 | 711 | |
|
712 | 712 | def after(self): |
|
713 | 713 | self.ui.debug(b'run hg source post-conversion action\n') |
|
714 | 714 | |
|
715 | 715 | def hasnativeorder(self): |
|
716 | 716 | return True |
|
717 | 717 | |
|
718 | 718 | def hasnativeclose(self): |
|
719 | 719 | return True |
|
720 | 720 | |
|
721 | 721 | def lookuprev(self, rev): |
|
722 | 722 | try: |
|
723 | 723 | return hex(self.repo.lookup(rev)) |
|
724 | 724 | except (error.RepoError, error.LookupError): |
|
725 | 725 | return None |
|
726 | 726 | |
|
727 | 727 | def getbookmarks(self): |
|
728 | 728 | return bookmarks.listbookmarks(self.repo) |
|
729 | 729 | |
|
730 | 730 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
731 | 731 | """Mercurial, revision string is a 40 byte hex""" |
|
732 | 732 | self.checkhexformat(revstr, mapname) |
@@ -1,410 +1,410 b'' | |||
|
1 | 1 | # monotone.py - monotone support for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and |
|
4 | 4 | # others |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | import os |
|
10 | 10 | import re |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | from mercurial.pycompat import open |
|
14 | 14 | from mercurial import ( |
|
15 | 15 | error, |
|
16 | 16 | pycompat, |
|
17 | 17 | ) |
|
18 | 18 | from mercurial.utils import dateutil |
|
19 | 19 | |
|
20 | 20 | from . import common |
|
21 | 21 | |
|
22 | 22 | |
|
23 | 23 | class monotone_source(common.converter_source, common.commandline): |
|
24 | 24 | def __init__(self, ui, repotype, path=None, revs=None): |
|
25 | 25 | common.converter_source.__init__(self, ui, repotype, path, revs) |
|
26 | 26 | if revs and len(revs) > 1: |
|
27 | 27 | raise error.Abort( |
|
28 | 28 | _( |
|
29 | 29 | b'monotone source does not support specifying ' |
|
30 | 30 | b'multiple revs' |
|
31 | 31 | ) |
|
32 | 32 | ) |
|
33 | 33 | common.commandline.__init__(self, ui, b'mtn') |
|
34 | 34 | |
|
35 | 35 | self.ui = ui |
|
36 | 36 | self.path = path |
|
37 | 37 | self.automatestdio = False |
|
38 | 38 | self.revs = revs |
|
39 | 39 | |
|
40 | 40 | norepo = common.NoRepo( |
|
41 | 41 | _(b"%s does not look like a monotone repository") % path |
|
42 | 42 | ) |
|
43 | 43 | if not os.path.exists(os.path.join(path, b'_MTN')): |
|
44 | 44 | # Could be a monotone repository (SQLite db file) |
|
45 | 45 | try: |
|
46 | 46 | f = open(path, b'rb') |
|
47 | 47 | header = f.read(16) |
|
48 | 48 | f.close() |
|
49 | 49 | except IOError: |
|
50 | 50 | header = b'' |
|
51 | 51 | if header != b'SQLite format 3\x00': |
|
52 | 52 | raise norepo |
|
53 | 53 | |
|
54 | 54 | # regular expressions for parsing monotone output |
|
55 | 55 | space = br'\s*' |
|
56 | 56 | name = br'\s+"((?:\\"|[^"])*)"\s*' |
|
57 | 57 | value = name |
|
58 | 58 | revision = br'\s+\[(\w+)\]\s*' |
|
59 | 59 | lines = br'(?:.|\n)+' |
|
60 | 60 | |
|
61 | 61 | self.dir_re = re.compile(space + b"dir" + name) |
|
62 | 62 | self.file_re = re.compile( |
|
63 | 63 | space + b"file" + name + b"content" + revision |
|
64 | 64 | ) |
|
65 | 65 | self.add_file_re = re.compile( |
|
66 | 66 | space + b"add_file" + name + b"content" + revision |
|
67 | 67 | ) |
|
68 | 68 | self.patch_re = re.compile( |
|
69 | 69 | space + b"patch" + name + b"from" + revision + b"to" + revision |
|
70 | 70 | ) |
|
71 | 71 | self.rename_re = re.compile(space + b"rename" + name + b"to" + name) |
|
72 | 72 | self.delete_re = re.compile(space + b"delete" + name) |
|
73 | 73 | self.tag_re = re.compile(space + b"tag" + name + b"revision" + revision) |
|
74 | 74 | self.cert_re = re.compile( |
|
75 | 75 | lines + space + b"name" + name + b"value" + value |
|
76 | 76 | ) |
|
77 | 77 | |
|
78 | 78 | attr = space + b"file" + lines + space + b"attr" + space |
|
79 | 79 | self.attr_execute_re = re.compile( |
|
80 | 80 | attr + b'"mtn:execute"' + space + b'"true"' |
|
81 | 81 | ) |
|
82 | 82 | |
|
83 | 83 | # cached data |
|
84 | 84 | self.manifest_rev = None |
|
85 | 85 | self.manifest = None |
|
86 | 86 | self.files = None |
|
87 | 87 | self.dirs = None |
|
88 | 88 | |
|
89 | 89 | common.checktool(b'mtn', abort=False) |
|
90 | 90 | |
|
91 | 91 | def mtnrun(self, *args, **kwargs): |
|
92 | 92 | if self.automatestdio: |
|
93 | 93 | return self.mtnrunstdio(*args, **kwargs) |
|
94 | 94 | else: |
|
95 | 95 | return self.mtnrunsingle(*args, **kwargs) |
|
96 | 96 | |
|
97 | 97 | def mtnrunsingle(self, *args, **kwargs): |
|
98 | 98 | kwargs['d'] = self.path |
|
99 | 99 | return self.run0(b'automate', *args, **kwargs) |
|
100 | 100 | |
|
101 | 101 | def mtnrunstdio(self, *args, **kwargs): |
|
102 | 102 | # Prepare the command in automate stdio format |
|
103 | 103 | kwargs = pycompat.byteskwargs(kwargs) |
|
104 | 104 | command = [] |
|
105 |
for k, v in |
|
|
105 | for k, v in kwargs.items(): | |
|
106 | 106 | command.append(b"%d:%s" % (len(k), k)) |
|
107 | 107 | if v: |
|
108 | 108 | command.append(b"%d:%s" % (len(v), v)) |
|
109 | 109 | if command: |
|
110 | 110 | command.insert(0, b'o') |
|
111 | 111 | command.append(b'e') |
|
112 | 112 | |
|
113 | 113 | command.append(b'l') |
|
114 | 114 | for arg in args: |
|
115 | 115 | command.append(b"%d:%s" % (len(arg), arg)) |
|
116 | 116 | command.append(b'e') |
|
117 | 117 | command = b''.join(command) |
|
118 | 118 | |
|
119 | 119 | self.ui.debug(b"mtn: sending '%s'\n" % command) |
|
120 | 120 | self.mtnwritefp.write(command) |
|
121 | 121 | self.mtnwritefp.flush() |
|
122 | 122 | |
|
123 | 123 | return self.mtnstdioreadcommandoutput(command) |
|
124 | 124 | |
|
125 | 125 | def mtnstdioreadpacket(self): |
|
126 | 126 | read = None |
|
127 | 127 | commandnbr = b'' |
|
128 | 128 | while read != b':': |
|
129 | 129 | read = self.mtnreadfp.read(1) |
|
130 | 130 | if not read: |
|
131 | 131 | raise error.Abort(_(b'bad mtn packet - no end of commandnbr')) |
|
132 | 132 | commandnbr += read |
|
133 | 133 | commandnbr = commandnbr[:-1] |
|
134 | 134 | |
|
135 | 135 | stream = self.mtnreadfp.read(1) |
|
136 | 136 | if stream not in b'mewptl': |
|
137 | 137 | raise error.Abort( |
|
138 | 138 | _(b'bad mtn packet - bad stream type %s') % stream |
|
139 | 139 | ) |
|
140 | 140 | |
|
141 | 141 | read = self.mtnreadfp.read(1) |
|
142 | 142 | if read != b':': |
|
143 | 143 | raise error.Abort(_(b'bad mtn packet - no divider before size')) |
|
144 | 144 | |
|
145 | 145 | read = None |
|
146 | 146 | lengthstr = b'' |
|
147 | 147 | while read != b':': |
|
148 | 148 | read = self.mtnreadfp.read(1) |
|
149 | 149 | if not read: |
|
150 | 150 | raise error.Abort(_(b'bad mtn packet - no end of packet size')) |
|
151 | 151 | lengthstr += read |
|
152 | 152 | try: |
|
153 | 153 | length = pycompat.long(lengthstr[:-1]) |
|
154 | 154 | except TypeError: |
|
155 | 155 | raise error.Abort( |
|
156 | 156 | _(b'bad mtn packet - bad packet size %s') % lengthstr |
|
157 | 157 | ) |
|
158 | 158 | |
|
159 | 159 | read = self.mtnreadfp.read(length) |
|
160 | 160 | if len(read) != length: |
|
161 | 161 | raise error.Abort( |
|
162 | 162 | _( |
|
163 | 163 | b"bad mtn packet - unable to read full packet " |
|
164 | 164 | b"read %s of %s" |
|
165 | 165 | ) |
|
166 | 166 | % (len(read), length) |
|
167 | 167 | ) |
|
168 | 168 | |
|
169 | 169 | return (commandnbr, stream, length, read) |
|
170 | 170 | |
|
171 | 171 | def mtnstdioreadcommandoutput(self, command): |
|
172 | 172 | retval = [] |
|
173 | 173 | while True: |
|
174 | 174 | commandnbr, stream, length, output = self.mtnstdioreadpacket() |
|
175 | 175 | self.ui.debug( |
|
176 | 176 | b'mtn: read packet %s:%s:%d\n' % (commandnbr, stream, length) |
|
177 | 177 | ) |
|
178 | 178 | |
|
179 | 179 | if stream == b'l': |
|
180 | 180 | # End of command |
|
181 | 181 | if output != b'0': |
|
182 | 182 | raise error.Abort( |
|
183 | 183 | _(b"mtn command '%s' returned %s") % (command, output) |
|
184 | 184 | ) |
|
185 | 185 | break |
|
186 | 186 | elif stream in b'ew': |
|
187 | 187 | # Error, warning output |
|
188 | 188 | self.ui.warn(_(b'%s error:\n') % self.command) |
|
189 | 189 | self.ui.warn(output) |
|
190 | 190 | elif stream == b'p': |
|
191 | 191 | # Progress messages |
|
192 | 192 | self.ui.debug(b'mtn: ' + output) |
|
193 | 193 | elif stream == b'm': |
|
194 | 194 | # Main stream - command output |
|
195 | 195 | retval.append(output) |
|
196 | 196 | |
|
197 | 197 | return b''.join(retval) |
|
198 | 198 | |
|
199 | 199 | def mtnloadmanifest(self, rev): |
|
200 | 200 | if self.manifest_rev == rev: |
|
201 | 201 | return |
|
202 | 202 | self.manifest = self.mtnrun(b"get_manifest_of", rev).split(b"\n\n") |
|
203 | 203 | self.manifest_rev = rev |
|
204 | 204 | self.files = {} |
|
205 | 205 | self.dirs = {} |
|
206 | 206 | |
|
207 | 207 | for e in self.manifest: |
|
208 | 208 | m = self.file_re.match(e) |
|
209 | 209 | if m: |
|
210 | 210 | attr = b"" |
|
211 | 211 | name = m.group(1) |
|
212 | 212 | node = m.group(2) |
|
213 | 213 | if self.attr_execute_re.match(e): |
|
214 | 214 | attr += b"x" |
|
215 | 215 | self.files[name] = (node, attr) |
|
216 | 216 | m = self.dir_re.match(e) |
|
217 | 217 | if m: |
|
218 | 218 | self.dirs[m.group(1)] = True |
|
219 | 219 | |
|
220 | 220 | def mtnisfile(self, name, rev): |
|
221 | 221 | # a non-file could be a directory or a deleted or renamed file |
|
222 | 222 | self.mtnloadmanifest(rev) |
|
223 | 223 | return name in self.files |
|
224 | 224 | |
|
225 | 225 | def mtnisdir(self, name, rev): |
|
226 | 226 | self.mtnloadmanifest(rev) |
|
227 | 227 | return name in self.dirs |
|
228 | 228 | |
|
229 | 229 | def mtngetcerts(self, rev): |
|
230 | 230 | certs = { |
|
231 | 231 | b"author": b"<missing>", |
|
232 | 232 | b"date": b"<missing>", |
|
233 | 233 | b"changelog": b"<missing>", |
|
234 | 234 | b"branch": b"<missing>", |
|
235 | 235 | } |
|
236 | 236 | certlist = self.mtnrun(b"certs", rev) |
|
237 | 237 | # mtn < 0.45: |
|
238 | 238 | # key "test@selenic.com" |
|
239 | 239 | # mtn >= 0.45: |
|
240 | 240 | # key [ff58a7ffb771907c4ff68995eada1c4da068d328] |
|
241 | 241 | certlist = re.split(br'\n\n {6}key ["\[]', certlist) |
|
242 | 242 | for e in certlist: |
|
243 | 243 | m = self.cert_re.match(e) |
|
244 | 244 | if m: |
|
245 | 245 | name, value = m.groups() |
|
246 | 246 | value = value.replace(br'\"', b'"') |
|
247 | 247 | value = value.replace(br'\\', b'\\') |
|
248 | 248 | certs[name] = value |
|
249 | 249 | # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306 |
|
250 | 250 | # and all times are stored in UTC |
|
251 | 251 | certs[b"date"] = certs[b"date"].split(b'.')[0] + b" UTC" |
|
252 | 252 | return certs |
|
253 | 253 | |
|
254 | 254 | # implement the converter_source interface: |
|
255 | 255 | |
|
256 | 256 | def getheads(self): |
|
257 | 257 | if not self.revs: |
|
258 | 258 | return self.mtnrun(b"leaves").splitlines() |
|
259 | 259 | else: |
|
260 | 260 | return self.revs |
|
261 | 261 | |
|
262 | 262 | def getchanges(self, rev, full): |
|
263 | 263 | if full: |
|
264 | 264 | raise error.Abort( |
|
265 | 265 | _(b"convert from monotone does not support --full") |
|
266 | 266 | ) |
|
267 | 267 | revision = self.mtnrun(b"get_revision", rev).split(b"\n\n") |
|
268 | 268 | files = {} |
|
269 | 269 | ignoremove = {} |
|
270 | 270 | renameddirs = [] |
|
271 | 271 | copies = {} |
|
272 | 272 | for e in revision: |
|
273 | 273 | m = self.add_file_re.match(e) |
|
274 | 274 | if m: |
|
275 | 275 | files[m.group(1)] = rev |
|
276 | 276 | ignoremove[m.group(1)] = rev |
|
277 | 277 | m = self.patch_re.match(e) |
|
278 | 278 | if m: |
|
279 | 279 | files[m.group(1)] = rev |
|
280 | 280 | # Delete/rename is handled later when the convert engine |
|
281 | 281 | # discovers an IOError exception from getfile, |
|
282 | 282 | # but only if we add the "from" file to the list of changes. |
|
283 | 283 | m = self.delete_re.match(e) |
|
284 | 284 | if m: |
|
285 | 285 | files[m.group(1)] = rev |
|
286 | 286 | m = self.rename_re.match(e) |
|
287 | 287 | if m: |
|
288 | 288 | toname = m.group(2) |
|
289 | 289 | fromname = m.group(1) |
|
290 | 290 | if self.mtnisfile(toname, rev): |
|
291 | 291 | ignoremove[toname] = 1 |
|
292 | 292 | copies[toname] = fromname |
|
293 | 293 | files[toname] = rev |
|
294 | 294 | files[fromname] = rev |
|
295 | 295 | elif self.mtnisdir(toname, rev): |
|
296 | 296 | renameddirs.append((fromname, toname)) |
|
297 | 297 | |
|
298 | 298 | # Directory renames can be handled only once we have recorded |
|
299 | 299 | # all new files |
|
300 | 300 | for fromdir, todir in renameddirs: |
|
301 | 301 | renamed = {} |
|
302 | 302 | for tofile in self.files: |
|
303 | 303 | if tofile in ignoremove: |
|
304 | 304 | continue |
|
305 | 305 | if tofile.startswith(todir + b'/'): |
|
306 | 306 | renamed[tofile] = fromdir + tofile[len(todir) :] |
|
307 | 307 | # Avoid chained moves like: |
|
308 | 308 | # d1(/a) => d3/d1(/a) |
|
309 | 309 | # d2 => d3 |
|
310 | 310 | ignoremove[tofile] = 1 |
|
311 | 311 | for tofile, fromfile in renamed.items(): |
|
312 | 312 | self.ui.debug( |
|
313 | 313 | b"copying file in renamed directory from '%s' to '%s'" |
|
314 | 314 | % (fromfile, tofile), |
|
315 | 315 | b'\n', |
|
316 | 316 | ) |
|
317 | 317 | files[tofile] = rev |
|
318 | 318 | copies[tofile] = fromfile |
|
319 | 319 | for fromfile in renamed.values(): |
|
320 | 320 | files[fromfile] = rev |
|
321 | 321 | |
|
322 | 322 | return (files.items(), copies, set()) |
|
323 | 323 | |
|
324 | 324 | def getfile(self, name, rev): |
|
325 | 325 | if not self.mtnisfile(name, rev): |
|
326 | 326 | return None, None |
|
327 | 327 | try: |
|
328 | 328 | data = self.mtnrun(b"get_file_of", name, r=rev) |
|
329 | 329 | except Exception: |
|
330 | 330 | return None, None |
|
331 | 331 | self.mtnloadmanifest(rev) |
|
332 | 332 | node, attr = self.files.get(name, (None, b"")) |
|
333 | 333 | return data, attr |
|
334 | 334 | |
|
335 | 335 | def getcommit(self, rev): |
|
336 | 336 | extra = {} |
|
337 | 337 | certs = self.mtngetcerts(rev) |
|
338 | 338 | if certs.get(b'suspend') == certs[b"branch"]: |
|
339 | 339 | extra[b'close'] = b'1' |
|
340 | 340 | dateformat = b"%Y-%m-%dT%H:%M:%S" |
|
341 | 341 | return common.commit( |
|
342 | 342 | author=certs[b"author"], |
|
343 | 343 | date=dateutil.datestr(dateutil.strdate(certs[b"date"], dateformat)), |
|
344 | 344 | desc=certs[b"changelog"], |
|
345 | 345 | rev=rev, |
|
346 | 346 | parents=self.mtnrun(b"parents", rev).splitlines(), |
|
347 | 347 | branch=certs[b"branch"], |
|
348 | 348 | extra=extra, |
|
349 | 349 | ) |
|
350 | 350 | |
|
351 | 351 | def gettags(self): |
|
352 | 352 | tags = {} |
|
353 | 353 | for e in self.mtnrun(b"tags").split(b"\n\n"): |
|
354 | 354 | m = self.tag_re.match(e) |
|
355 | 355 | if m: |
|
356 | 356 | tags[m.group(1)] = m.group(2) |
|
357 | 357 | return tags |
|
358 | 358 | |
|
359 | 359 | def getchangedfiles(self, rev, i): |
|
360 | 360 | # This function is only needed to support --filemap |
|
361 | 361 | # ... and we don't support that |
|
362 | 362 | raise NotImplementedError |
|
363 | 363 | |
|
364 | 364 | def before(self): |
|
365 | 365 | # Check if we have a new enough version to use automate stdio |
|
366 | 366 | try: |
|
367 | 367 | versionstr = self.mtnrunsingle(b"interface_version") |
|
368 | 368 | version = float(versionstr) |
|
369 | 369 | except Exception: |
|
370 | 370 | raise error.Abort( |
|
371 | 371 | _(b"unable to determine mtn automate interface version") |
|
372 | 372 | ) |
|
373 | 373 | |
|
374 | 374 | if version >= 12.0: |
|
375 | 375 | self.automatestdio = True |
|
376 | 376 | self.ui.debug( |
|
377 | 377 | b"mtn automate version %f - using automate stdio\n" % version |
|
378 | 378 | ) |
|
379 | 379 | |
|
380 | 380 | # launch the long-running automate stdio process |
|
381 | 381 | self.mtnwritefp, self.mtnreadfp = self._run2( |
|
382 | 382 | b'automate', b'stdio', b'-d', self.path |
|
383 | 383 | ) |
|
384 | 384 | # read the headers |
|
385 | 385 | read = self.mtnreadfp.readline() |
|
386 | 386 | if read != b'format-version: 2\n': |
|
387 | 387 | raise error.Abort( |
|
388 | 388 | _(b'mtn automate stdio header unexpected: %s') % read |
|
389 | 389 | ) |
|
390 | 390 | while read != b'\n': |
|
391 | 391 | read = self.mtnreadfp.readline() |
|
392 | 392 | if not read: |
|
393 | 393 | raise error.Abort( |
|
394 | 394 | _( |
|
395 | 395 | b"failed to reach end of mtn automate " |
|
396 | 396 | b"stdio headers" |
|
397 | 397 | ) |
|
398 | 398 | ) |
|
399 | 399 | else: |
|
400 | 400 | self.ui.debug( |
|
401 | 401 | b"mtn automate version %s - not using automate stdio " |
|
402 | 402 | b"(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version |
|
403 | 403 | ) |
|
404 | 404 | |
|
405 | 405 | def after(self): |
|
406 | 406 | if self.automatestdio: |
|
407 | 407 | self.mtnwritefp.close() |
|
408 | 408 | self.mtnwritefp = None |
|
409 | 409 | self.mtnreadfp.close() |
|
410 | 410 | self.mtnreadfp = None |
@@ -1,1740 +1,1740 b'' | |||
|
1 | 1 | # Subversion 1.4/1.5 Python API backend |
|
2 | 2 | # |
|
3 | 3 | # Copyright(C) 2007 Daniel Holth et al |
|
4 | 4 | |
|
5 | 5 | import codecs |
|
6 | 6 | import locale |
|
7 | 7 | import os |
|
8 | 8 | import pickle |
|
9 | 9 | import re |
|
10 | 10 | import xml.dom.minidom |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | from mercurial.pycompat import open |
|
14 | 14 | from mercurial import ( |
|
15 | 15 | encoding, |
|
16 | 16 | error, |
|
17 | 17 | pycompat, |
|
18 | 18 | util, |
|
19 | 19 | vfs as vfsmod, |
|
20 | 20 | ) |
|
21 | 21 | from mercurial.utils import ( |
|
22 | 22 | dateutil, |
|
23 | 23 | procutil, |
|
24 | 24 | stringutil, |
|
25 | 25 | ) |
|
26 | 26 | |
|
27 | 27 | from . import common |
|
28 | 28 | |
|
29 | 29 | stringio = util.stringio |
|
30 | 30 | propertycache = util.propertycache |
|
31 | 31 | urlerr = util.urlerr |
|
32 | 32 | urlreq = util.urlreq |
|
33 | 33 | |
|
34 | 34 | commandline = common.commandline |
|
35 | 35 | commit = common.commit |
|
36 | 36 | converter_sink = common.converter_sink |
|
37 | 37 | converter_source = common.converter_source |
|
38 | 38 | decodeargs = common.decodeargs |
|
39 | 39 | encodeargs = common.encodeargs |
|
40 | 40 | makedatetimestamp = common.makedatetimestamp |
|
41 | 41 | mapfile = common.mapfile |
|
42 | 42 | MissingTool = common.MissingTool |
|
43 | 43 | NoRepo = common.NoRepo |
|
44 | 44 | |
|
45 | 45 | # Subversion stuff. Works best with very recent Python SVN bindings |
|
46 | 46 | # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing |
|
47 | 47 | # these bindings. |
|
48 | 48 | |
|
49 | 49 | try: |
|
50 | 50 | import svn |
|
51 | 51 | import svn.client |
|
52 | 52 | import svn.core |
|
53 | 53 | import svn.ra |
|
54 | 54 | import svn.delta |
|
55 | 55 | from . import transport |
|
56 | 56 | import warnings |
|
57 | 57 | |
|
58 | 58 | warnings.filterwarnings( |
|
59 | 59 | 'ignore', module='svn.core', category=DeprecationWarning |
|
60 | 60 | ) |
|
61 | 61 | svn.core.SubversionException # trigger import to catch error |
|
62 | 62 | |
|
63 | 63 | except ImportError: |
|
64 | 64 | svn = None |
|
65 | 65 | |
|
66 | 66 | |
|
67 | 67 | # In Subversion, paths and URLs are Unicode (encoded as UTF-8), which |
|
68 | 68 | # Subversion converts from / to native strings when interfacing with the OS. |
|
69 | 69 | # When passing paths and URLs to Subversion, we have to recode them such that |
|
70 | 70 | # it roundstrips with what Subversion is doing. |
|
71 | 71 | |
|
72 | 72 | fsencoding = None |
|
73 | 73 | |
|
74 | 74 | |
|
75 | 75 | def init_fsencoding(): |
|
76 | 76 | global fsencoding, fsencoding_is_utf8 |
|
77 | 77 | if fsencoding is not None: |
|
78 | 78 | return |
|
79 | 79 | if pycompat.iswindows: |
|
80 | 80 | # On Windows, filenames are Unicode, but we store them using the MBCS |
|
81 | 81 | # encoding. |
|
82 | 82 | fsencoding = 'mbcs' |
|
83 | 83 | else: |
|
84 | 84 | # This is the encoding used to convert UTF-8 back to natively-encoded |
|
85 | 85 | # strings in Subversion 1.14.0 or earlier with APR 1.7.0 or earlier. |
|
86 | 86 | with util.with_lc_ctype(): |
|
87 | 87 | fsencoding = locale.nl_langinfo(locale.CODESET) or 'ISO-8859-1' |
|
88 | 88 | fsencoding = codecs.lookup(fsencoding).name |
|
89 | 89 | fsencoding_is_utf8 = fsencoding == codecs.lookup('utf-8').name |
|
90 | 90 | |
|
91 | 91 | |
|
92 | 92 | def fs2svn(s): |
|
93 | 93 | if fsencoding_is_utf8: |
|
94 | 94 | return s |
|
95 | 95 | else: |
|
96 | 96 | return s.decode(fsencoding).encode('utf-8') |
|
97 | 97 | |
|
98 | 98 | |
|
99 | 99 | def formatsvndate(date): |
|
100 | 100 | return dateutil.datestr(date, b'%Y-%m-%dT%H:%M:%S.000000Z') |
|
101 | 101 | |
|
102 | 102 | |
|
103 | 103 | def parsesvndate(s): |
|
104 | 104 | # Example SVN datetime. Includes microseconds. |
|
105 | 105 | # ISO-8601 conformant |
|
106 | 106 | # '2007-01-04T17:35:00.902377Z' |
|
107 | 107 | return dateutil.parsedate(s[:19] + b' UTC', [b'%Y-%m-%dT%H:%M:%S']) |
|
108 | 108 | |
|
109 | 109 | |
|
110 | 110 | class SvnPathNotFound(Exception): |
|
111 | 111 | pass |
|
112 | 112 | |
|
113 | 113 | |
|
114 | 114 | def revsplit(rev): |
|
115 | 115 | """Parse a revision string and return (uuid, path, revnum). |
|
116 | 116 | >>> revsplit(b'svn:a2147622-4a9f-4db4-a8d3-13562ff547b2' |
|
117 | 117 | ... b'/proj%20B/mytrunk/mytrunk@1') |
|
118 | 118 | ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1) |
|
119 | 119 | >>> revsplit(b'svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1') |
|
120 | 120 | ('', '', 1) |
|
121 | 121 | >>> revsplit(b'@7') |
|
122 | 122 | ('', '', 7) |
|
123 | 123 | >>> revsplit(b'7') |
|
124 | 124 | ('', '', 0) |
|
125 | 125 | >>> revsplit(b'bad') |
|
126 | 126 | ('', '', 0) |
|
127 | 127 | """ |
|
128 | 128 | parts = rev.rsplit(b'@', 1) |
|
129 | 129 | revnum = 0 |
|
130 | 130 | if len(parts) > 1: |
|
131 | 131 | revnum = int(parts[1]) |
|
132 | 132 | parts = parts[0].split(b'/', 1) |
|
133 | 133 | uuid = b'' |
|
134 | 134 | mod = b'' |
|
135 | 135 | if len(parts) > 1 and parts[0].startswith(b'svn:'): |
|
136 | 136 | uuid = parts[0][4:] |
|
137 | 137 | mod = b'/' + parts[1] |
|
138 | 138 | return uuid, mod, revnum |
|
139 | 139 | |
|
140 | 140 | |
|
141 | 141 | def quote(s): |
|
142 | 142 | # As of svn 1.7, many svn calls expect "canonical" paths. In |
|
143 | 143 | # theory, we should call svn.core.*canonicalize() on all paths |
|
144 | 144 | # before passing them to the API. Instead, we assume the base url |
|
145 | 145 | # is canonical and copy the behaviour of svn URL encoding function |
|
146 | 146 | # so we can extend it safely with new components. The "safe" |
|
147 | 147 | # characters were taken from the "svn_uri__char_validity" table in |
|
148 | 148 | # libsvn_subr/path.c. |
|
149 | 149 | return urlreq.quote(s, b"!$&'()*+,-./:=@_~") |
|
150 | 150 | |
|
151 | 151 | |
|
152 | 152 | def geturl(path): |
|
153 | 153 | """Convert path or URL to a SVN URL, encoded in UTF-8. |
|
154 | 154 | |
|
155 | 155 | This can raise UnicodeDecodeError if the path or URL can't be converted to |
|
156 | 156 | unicode using `fsencoding`. |
|
157 | 157 | """ |
|
158 | 158 | try: |
|
159 | 159 | return svn.client.url_from_path( |
|
160 | 160 | svn.core.svn_path_canonicalize(fs2svn(path)) |
|
161 | 161 | ) |
|
162 | 162 | except svn.core.SubversionException: |
|
163 | 163 | # svn.client.url_from_path() fails with local repositories |
|
164 | 164 | pass |
|
165 | 165 | if os.path.isdir(path): |
|
166 | 166 | path = os.path.normpath(util.abspath(path)) |
|
167 | 167 | if pycompat.iswindows: |
|
168 | 168 | path = b'/' + util.normpath(path) |
|
169 | 169 | # Module URL is later compared with the repository URL returned |
|
170 | 170 | # by svn API, which is UTF-8. |
|
171 | 171 | path = fs2svn(path) |
|
172 | 172 | path = b'file://%s' % quote(path) |
|
173 | 173 | return svn.core.svn_path_canonicalize(path) |
|
174 | 174 | |
|
175 | 175 | |
|
176 | 176 | def optrev(number): |
|
177 | 177 | optrev = svn.core.svn_opt_revision_t() |
|
178 | 178 | optrev.kind = svn.core.svn_opt_revision_number |
|
179 | 179 | optrev.value.number = number |
|
180 | 180 | return optrev |
|
181 | 181 | |
|
182 | 182 | |
|
183 | 183 | class changedpath(object): |
|
184 | 184 | def __init__(self, p): |
|
185 | 185 | self.copyfrom_path = p.copyfrom_path |
|
186 | 186 | self.copyfrom_rev = p.copyfrom_rev |
|
187 | 187 | self.action = p.action |
|
188 | 188 | |
|
189 | 189 | |
|
190 | 190 | def get_log_child( |
|
191 | 191 | fp, |
|
192 | 192 | url, |
|
193 | 193 | paths, |
|
194 | 194 | start, |
|
195 | 195 | end, |
|
196 | 196 | limit=0, |
|
197 | 197 | discover_changed_paths=True, |
|
198 | 198 | strict_node_history=False, |
|
199 | 199 | ): |
|
200 | 200 | protocol = -1 |
|
201 | 201 | |
|
202 | 202 | def receiver(orig_paths, revnum, author, date, message, pool): |
|
203 | 203 | paths = {} |
|
204 | 204 | if orig_paths is not None: |
|
205 |
for k, v in |
|
|
205 | for k, v in orig_paths.items(): | |
|
206 | 206 | paths[k] = changedpath(v) |
|
207 | 207 | pickle.dump((paths, revnum, author, date, message), fp, protocol) |
|
208 | 208 | |
|
209 | 209 | try: |
|
210 | 210 | # Use an ra of our own so that our parent can consume |
|
211 | 211 | # our results without confusing the server. |
|
212 | 212 | t = transport.SvnRaTransport(url=url) |
|
213 | 213 | svn.ra.get_log( |
|
214 | 214 | t.ra, |
|
215 | 215 | paths, |
|
216 | 216 | start, |
|
217 | 217 | end, |
|
218 | 218 | limit, |
|
219 | 219 | discover_changed_paths, |
|
220 | 220 | strict_node_history, |
|
221 | 221 | receiver, |
|
222 | 222 | ) |
|
223 | 223 | except IOError: |
|
224 | 224 | # Caller may interrupt the iteration |
|
225 | 225 | pickle.dump(None, fp, protocol) |
|
226 | 226 | except Exception as inst: |
|
227 | 227 | pickle.dump(stringutil.forcebytestr(inst), fp, protocol) |
|
228 | 228 | else: |
|
229 | 229 | pickle.dump(None, fp, protocol) |
|
230 | 230 | fp.flush() |
|
231 | 231 | # With large history, cleanup process goes crazy and suddenly |
|
232 | 232 | # consumes *huge* amount of memory. The output file being closed, |
|
233 | 233 | # there is no need for clean termination. |
|
234 | 234 | os._exit(0) |
|
235 | 235 | |
|
236 | 236 | |
|
237 | 237 | def debugsvnlog(ui, **opts): |
|
238 | 238 | """Fetch SVN log in a subprocess and channel them back to parent to |
|
239 | 239 | avoid memory collection issues. |
|
240 | 240 | """ |
|
241 | 241 | with util.with_lc_ctype(): |
|
242 | 242 | if svn is None: |
|
243 | 243 | raise error.Abort( |
|
244 | 244 | _(b'debugsvnlog could not load Subversion python bindings') |
|
245 | 245 | ) |
|
246 | 246 | |
|
247 | 247 | args = decodeargs(ui.fin.read()) |
|
248 | 248 | get_log_child(ui.fout, *args) |
|
249 | 249 | |
|
250 | 250 | |
|
251 | 251 | class logstream(object): |
|
252 | 252 | """Interruptible revision log iterator.""" |
|
253 | 253 | |
|
254 | 254 | def __init__(self, stdout): |
|
255 | 255 | self._stdout = stdout |
|
256 | 256 | |
|
257 | 257 | def __iter__(self): |
|
258 | 258 | while True: |
|
259 | 259 | try: |
|
260 | 260 | entry = pickle.load(self._stdout) |
|
261 | 261 | except EOFError: |
|
262 | 262 | raise error.Abort( |
|
263 | 263 | _( |
|
264 | 264 | b'Mercurial failed to run itself, check' |
|
265 | 265 | b' hg executable is in PATH' |
|
266 | 266 | ) |
|
267 | 267 | ) |
|
268 | 268 | try: |
|
269 | 269 | orig_paths, revnum, author, date, message = entry |
|
270 | 270 | except (TypeError, ValueError): |
|
271 | 271 | if entry is None: |
|
272 | 272 | break |
|
273 | 273 | raise error.Abort(_(b"log stream exception '%s'") % entry) |
|
274 | 274 | yield entry |
|
275 | 275 | |
|
276 | 276 | def close(self): |
|
277 | 277 | if self._stdout: |
|
278 | 278 | self._stdout.close() |
|
279 | 279 | self._stdout = None |
|
280 | 280 | |
|
281 | 281 | |
|
282 | 282 | class directlogstream(list): |
|
283 | 283 | """Direct revision log iterator. |
|
284 | 284 | This can be used for debugging and development but it will probably leak |
|
285 | 285 | memory and is not suitable for real conversions.""" |
|
286 | 286 | |
|
287 | 287 | def __init__( |
|
288 | 288 | self, |
|
289 | 289 | url, |
|
290 | 290 | paths, |
|
291 | 291 | start, |
|
292 | 292 | end, |
|
293 | 293 | limit=0, |
|
294 | 294 | discover_changed_paths=True, |
|
295 | 295 | strict_node_history=False, |
|
296 | 296 | ): |
|
297 | 297 | def receiver(orig_paths, revnum, author, date, message, pool): |
|
298 | 298 | paths = {} |
|
299 | 299 | if orig_paths is not None: |
|
300 |
for k, v in |
|
|
300 | for k, v in orig_paths.items(): | |
|
301 | 301 | paths[k] = changedpath(v) |
|
302 | 302 | self.append((paths, revnum, author, date, message)) |
|
303 | 303 | |
|
304 | 304 | # Use an ra of our own so that our parent can consume |
|
305 | 305 | # our results without confusing the server. |
|
306 | 306 | t = transport.SvnRaTransport(url=url) |
|
307 | 307 | svn.ra.get_log( |
|
308 | 308 | t.ra, |
|
309 | 309 | paths, |
|
310 | 310 | start, |
|
311 | 311 | end, |
|
312 | 312 | limit, |
|
313 | 313 | discover_changed_paths, |
|
314 | 314 | strict_node_history, |
|
315 | 315 | receiver, |
|
316 | 316 | ) |
|
317 | 317 | |
|
318 | 318 | def close(self): |
|
319 | 319 | pass |
|
320 | 320 | |
|
321 | 321 | |
|
322 | 322 | # Check to see if the given path is a local Subversion repo. Verify this by |
|
323 | 323 | # looking for several svn-specific files and directories in the given |
|
324 | 324 | # directory. |
|
325 | 325 | def filecheck(ui, path, proto): |
|
326 | 326 | for x in (b'locks', b'hooks', b'format', b'db'): |
|
327 | 327 | if not os.path.exists(os.path.join(path, x)): |
|
328 | 328 | return False |
|
329 | 329 | return True |
|
330 | 330 | |
|
331 | 331 | |
|
332 | 332 | # Check to see if a given path is the root of an svn repo over http. We verify |
|
333 | 333 | # this by requesting a version-controlled URL we know can't exist and looking |
|
334 | 334 | # for the svn-specific "not found" XML. |
|
335 | 335 | def httpcheck(ui, path, proto): |
|
336 | 336 | try: |
|
337 | 337 | opener = urlreq.buildopener() |
|
338 | 338 | rsp = opener.open( |
|
339 | 339 | pycompat.strurl(b'%s://%s/!svn/ver/0/.svn' % (proto, path)), b'rb' |
|
340 | 340 | ) |
|
341 | 341 | data = rsp.read() |
|
342 | 342 | except urlerr.httperror as inst: |
|
343 | 343 | if inst.code != 404: |
|
344 | 344 | # Except for 404 we cannot know for sure this is not an svn repo |
|
345 | 345 | ui.warn( |
|
346 | 346 | _( |
|
347 | 347 | b'svn: cannot probe remote repository, assume it could ' |
|
348 | 348 | b'be a subversion repository. Use --source-type if you ' |
|
349 | 349 | b'know better.\n' |
|
350 | 350 | ) |
|
351 | 351 | ) |
|
352 | 352 | return True |
|
353 | 353 | data = inst.fp.read() |
|
354 | 354 | except Exception: |
|
355 | 355 | # Could be urlerr.urlerror if the URL is invalid or anything else. |
|
356 | 356 | return False |
|
357 | 357 | return b'<m:human-readable errcode="160013">' in data |
|
358 | 358 | |
|
359 | 359 | |
|
360 | 360 | protomap = { |
|
361 | 361 | b'http': httpcheck, |
|
362 | 362 | b'https': httpcheck, |
|
363 | 363 | b'file': filecheck, |
|
364 | 364 | } |
|
365 | 365 | |
|
366 | 366 | |
|
367 | 367 | class NonUtf8PercentEncodedBytes(Exception): |
|
368 | 368 | pass |
|
369 | 369 | |
|
370 | 370 | |
|
371 | 371 | # Subversion paths are Unicode. Since the percent-decoding is done on |
|
372 | 372 | # UTF-8-encoded strings, percent-encoded bytes are interpreted as UTF-8. |
|
373 | 373 | def url2pathname_like_subversion(unicodepath): |
|
374 | 374 | if pycompat.ispy3: |
|
375 | 375 | # On Python 3, we have to pass unicode to urlreq.url2pathname(). |
|
376 | 376 | # Percent-decoded bytes get decoded using UTF-8 and the 'replace' error |
|
377 | 377 | # handler. |
|
378 | 378 | unicodepath = urlreq.url2pathname(unicodepath) |
|
379 | 379 | if u'\N{REPLACEMENT CHARACTER}' in unicodepath: |
|
380 | 380 | raise NonUtf8PercentEncodedBytes |
|
381 | 381 | else: |
|
382 | 382 | return unicodepath |
|
383 | 383 | else: |
|
384 | 384 | # If we passed unicode on Python 2, it would be converted using the |
|
385 | 385 | # latin-1 encoding. Therefore, we pass UTF-8-encoded bytes. |
|
386 | 386 | unicodepath = urlreq.url2pathname(unicodepath.encode('utf-8')) |
|
387 | 387 | try: |
|
388 | 388 | return unicodepath.decode('utf-8') |
|
389 | 389 | except UnicodeDecodeError: |
|
390 | 390 | raise NonUtf8PercentEncodedBytes |
|
391 | 391 | |
|
392 | 392 | |
|
393 | 393 | def issvnurl(ui, url): |
|
394 | 394 | try: |
|
395 | 395 | proto, path = url.split(b'://', 1) |
|
396 | 396 | if proto == b'file': |
|
397 | 397 | if ( |
|
398 | 398 | pycompat.iswindows |
|
399 | 399 | and path[:1] == b'/' |
|
400 | 400 | and path[1:2].isalpha() |
|
401 | 401 | and path[2:6].lower() == b'%3a/' |
|
402 | 402 | ): |
|
403 | 403 | path = path[:2] + b':/' + path[6:] |
|
404 | 404 | try: |
|
405 | 405 | unicodepath = path.decode(fsencoding) |
|
406 | 406 | except UnicodeDecodeError: |
|
407 | 407 | ui.warn( |
|
408 | 408 | _( |
|
409 | 409 | b'Subversion requires that file URLs can be converted ' |
|
410 | 410 | b'to Unicode using the current locale encoding (%s)\n' |
|
411 | 411 | ) |
|
412 | 412 | % pycompat.sysbytes(fsencoding) |
|
413 | 413 | ) |
|
414 | 414 | return False |
|
415 | 415 | try: |
|
416 | 416 | unicodepath = url2pathname_like_subversion(unicodepath) |
|
417 | 417 | except NonUtf8PercentEncodedBytes: |
|
418 | 418 | ui.warn( |
|
419 | 419 | _( |
|
420 | 420 | b'Subversion does not support non-UTF-8 ' |
|
421 | 421 | b'percent-encoded bytes in file URLs\n' |
|
422 | 422 | ) |
|
423 | 423 | ) |
|
424 | 424 | return False |
|
425 | 425 | # Below, we approximate how Subversion checks the path. On Unix, we |
|
426 | 426 | # should therefore convert the path to bytes using `fsencoding` |
|
427 | 427 | # (like Subversion does). On Windows, the right thing would |
|
428 | 428 | # actually be to leave the path as unicode. For now, we restrict |
|
429 | 429 | # the path to MBCS. |
|
430 | 430 | path = unicodepath.encode(fsencoding) |
|
431 | 431 | except ValueError: |
|
432 | 432 | proto = b'file' |
|
433 | 433 | path = util.abspath(url) |
|
434 | 434 | try: |
|
435 | 435 | path.decode(fsencoding) |
|
436 | 436 | except UnicodeDecodeError: |
|
437 | 437 | ui.warn( |
|
438 | 438 | _( |
|
439 | 439 | b'Subversion requires that paths can be converted to ' |
|
440 | 440 | b'Unicode using the current locale encoding (%s)\n' |
|
441 | 441 | ) |
|
442 | 442 | % pycompat.sysbytes(fsencoding) |
|
443 | 443 | ) |
|
444 | 444 | return False |
|
445 | 445 | if proto == b'file': |
|
446 | 446 | path = util.pconvert(path) |
|
447 | 447 | elif proto in (b'http', 'https'): |
|
448 | 448 | if not encoding.isasciistr(path): |
|
449 | 449 | ui.warn( |
|
450 | 450 | _( |
|
451 | 451 | b"Subversion sources don't support non-ASCII characters in " |
|
452 | 452 | b"HTTP(S) URLs. Please percent-encode them.\n" |
|
453 | 453 | ) |
|
454 | 454 | ) |
|
455 | 455 | return False |
|
456 | 456 | check = protomap.get(proto, lambda *args: False) |
|
457 | 457 | while b'/' in path: |
|
458 | 458 | if check(ui, path, proto): |
|
459 | 459 | return True |
|
460 | 460 | path = path.rsplit(b'/', 1)[0] |
|
461 | 461 | return False |
|
462 | 462 | |
|
463 | 463 | |
|
464 | 464 | # SVN conversion code stolen from bzr-svn and tailor |
|
465 | 465 | # |
|
466 | 466 | # Subversion looks like a versioned filesystem, branches structures |
|
467 | 467 | # are defined by conventions and not enforced by the tool. First, |
|
468 | 468 | # we define the potential branches (modules) as "trunk" and "branches" |
|
469 | 469 | # children directories. Revisions are then identified by their |
|
470 | 470 | # module and revision number (and a repository identifier). |
|
471 | 471 | # |
|
472 | 472 | # The revision graph is really a tree (or a forest). By default, a |
|
473 | 473 | # revision parent is the previous revision in the same module. If the |
|
474 | 474 | # module directory is copied/moved from another module then the |
|
475 | 475 | # revision is the module root and its parent the source revision in |
|
476 | 476 | # the parent module. A revision has at most one parent. |
|
477 | 477 | # |
|
478 | 478 | class svn_source(converter_source): |
|
479 | 479 | def __init__(self, ui, repotype, url, revs=None): |
|
480 | 480 | super(svn_source, self).__init__(ui, repotype, url, revs=revs) |
|
481 | 481 | |
|
482 | 482 | init_fsencoding() |
|
483 | 483 | if not ( |
|
484 | 484 | url.startswith(b'svn://') |
|
485 | 485 | or url.startswith(b'svn+ssh://') |
|
486 | 486 | or ( |
|
487 | 487 | os.path.exists(url) |
|
488 | 488 | and os.path.exists(os.path.join(url, b'.svn')) |
|
489 | 489 | ) |
|
490 | 490 | or issvnurl(ui, url) |
|
491 | 491 | ): |
|
492 | 492 | raise NoRepo( |
|
493 | 493 | _(b"%s does not look like a Subversion repository") % url |
|
494 | 494 | ) |
|
495 | 495 | if svn is None: |
|
496 | 496 | raise MissingTool(_(b'could not load Subversion python bindings')) |
|
497 | 497 | |
|
498 | 498 | try: |
|
499 | 499 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR |
|
500 | 500 | if version < (1, 4): |
|
501 | 501 | raise MissingTool( |
|
502 | 502 | _( |
|
503 | 503 | b'Subversion python bindings %d.%d found, ' |
|
504 | 504 | b'1.4 or later required' |
|
505 | 505 | ) |
|
506 | 506 | % version |
|
507 | 507 | ) |
|
508 | 508 | except AttributeError: |
|
509 | 509 | raise MissingTool( |
|
510 | 510 | _( |
|
511 | 511 | b'Subversion python bindings are too old, 1.4 ' |
|
512 | 512 | b'or later required' |
|
513 | 513 | ) |
|
514 | 514 | ) |
|
515 | 515 | |
|
516 | 516 | self.lastrevs = {} |
|
517 | 517 | |
|
518 | 518 | latest = None |
|
519 | 519 | try: |
|
520 | 520 | # Support file://path@rev syntax. Useful e.g. to convert |
|
521 | 521 | # deleted branches. |
|
522 | 522 | at = url.rfind(b'@') |
|
523 | 523 | if at >= 0: |
|
524 | 524 | latest = int(url[at + 1 :]) |
|
525 | 525 | url = url[:at] |
|
526 | 526 | except ValueError: |
|
527 | 527 | pass |
|
528 | 528 | self.url = geturl(url) |
|
529 | 529 | self.encoding = b'UTF-8' # Subversion is always nominal UTF-8 |
|
530 | 530 | try: |
|
531 | 531 | with util.with_lc_ctype(): |
|
532 | 532 | self.transport = transport.SvnRaTransport(url=self.url) |
|
533 | 533 | self.ra = self.transport.ra |
|
534 | 534 | self.ctx = self.transport.client |
|
535 | 535 | self.baseurl = svn.ra.get_repos_root(self.ra) |
|
536 | 536 | # Module is either empty or a repository path starting with |
|
537 | 537 | # a slash and not ending with a slash. |
|
538 | 538 | self.module = urlreq.unquote(self.url[len(self.baseurl) :]) |
|
539 | 539 | self.prevmodule = None |
|
540 | 540 | self.rootmodule = self.module |
|
541 | 541 | self.commits = {} |
|
542 | 542 | self.paths = {} |
|
543 | 543 | self.uuid = svn.ra.get_uuid(self.ra) |
|
544 | 544 | except svn.core.SubversionException: |
|
545 | 545 | ui.traceback() |
|
546 | 546 | svnversion = b'%d.%d.%d' % ( |
|
547 | 547 | svn.core.SVN_VER_MAJOR, |
|
548 | 548 | svn.core.SVN_VER_MINOR, |
|
549 | 549 | svn.core.SVN_VER_MICRO, |
|
550 | 550 | ) |
|
551 | 551 | raise NoRepo( |
|
552 | 552 | _( |
|
553 | 553 | b"%s does not look like a Subversion repository " |
|
554 | 554 | b"to libsvn version %s" |
|
555 | 555 | ) |
|
556 | 556 | % (self.url, svnversion) |
|
557 | 557 | ) |
|
558 | 558 | |
|
559 | 559 | if revs: |
|
560 | 560 | if len(revs) > 1: |
|
561 | 561 | raise error.Abort( |
|
562 | 562 | _( |
|
563 | 563 | b'subversion source does not support ' |
|
564 | 564 | b'specifying multiple revisions' |
|
565 | 565 | ) |
|
566 | 566 | ) |
|
567 | 567 | try: |
|
568 | 568 | latest = int(revs[0]) |
|
569 | 569 | except ValueError: |
|
570 | 570 | raise error.Abort( |
|
571 | 571 | _(b'svn: revision %s is not an integer') % revs[0] |
|
572 | 572 | ) |
|
573 | 573 | |
|
574 | 574 | trunkcfg = self.ui.config(b'convert', b'svn.trunk') |
|
575 | 575 | if trunkcfg is None: |
|
576 | 576 | trunkcfg = b'trunk' |
|
577 | 577 | self.trunkname = trunkcfg.strip(b'/') |
|
578 | 578 | self.startrev = self.ui.config(b'convert', b'svn.startrev') |
|
579 | 579 | try: |
|
580 | 580 | self.startrev = int(self.startrev) |
|
581 | 581 | if self.startrev < 0: |
|
582 | 582 | self.startrev = 0 |
|
583 | 583 | except ValueError: |
|
584 | 584 | raise error.Abort( |
|
585 | 585 | _(b'svn: start revision %s is not an integer') % self.startrev |
|
586 | 586 | ) |
|
587 | 587 | |
|
588 | 588 | try: |
|
589 | 589 | with util.with_lc_ctype(): |
|
590 | 590 | self.head = self.latest(self.module, latest) |
|
591 | 591 | except SvnPathNotFound: |
|
592 | 592 | self.head = None |
|
593 | 593 | if not self.head: |
|
594 | 594 | raise error.Abort( |
|
595 | 595 | _(b'no revision found in module %s') % self.module |
|
596 | 596 | ) |
|
597 | 597 | self.last_changed = self.revnum(self.head) |
|
598 | 598 | |
|
599 | 599 | self._changescache = (None, None) |
|
600 | 600 | |
|
601 | 601 | if os.path.exists(os.path.join(url, b'.svn/entries')): |
|
602 | 602 | self.wc = url |
|
603 | 603 | else: |
|
604 | 604 | self.wc = None |
|
605 | 605 | self.convertfp = None |
|
606 | 606 | |
|
607 | 607 | def before(self): |
|
608 | 608 | self.with_lc_ctype = util.with_lc_ctype() |
|
609 | 609 | self.with_lc_ctype.__enter__() |
|
610 | 610 | |
|
611 | 611 | def after(self): |
|
612 | 612 | self.with_lc_ctype.__exit__(None, None, None) |
|
613 | 613 | |
|
614 | 614 | def setrevmap(self, revmap): |
|
615 | 615 | lastrevs = {} |
|
616 | 616 | for revid in revmap: |
|
617 | 617 | uuid, module, revnum = revsplit(revid) |
|
618 | 618 | lastrevnum = lastrevs.setdefault(module, revnum) |
|
619 | 619 | if revnum > lastrevnum: |
|
620 | 620 | lastrevs[module] = revnum |
|
621 | 621 | self.lastrevs = lastrevs |
|
622 | 622 | |
|
623 | 623 | def exists(self, path, optrev): |
|
624 | 624 | try: |
|
625 | 625 | svn.client.ls( |
|
626 | 626 | self.url.rstrip(b'/') + b'/' + quote(path), |
|
627 | 627 | optrev, |
|
628 | 628 | False, |
|
629 | 629 | self.ctx, |
|
630 | 630 | ) |
|
631 | 631 | return True |
|
632 | 632 | except svn.core.SubversionException: |
|
633 | 633 | return False |
|
634 | 634 | |
|
635 | 635 | def getheads(self): |
|
636 | 636 | def isdir(path, revnum): |
|
637 | 637 | kind = self._checkpath(path, revnum) |
|
638 | 638 | return kind == svn.core.svn_node_dir |
|
639 | 639 | |
|
640 | 640 | def getcfgpath(name, rev): |
|
641 | 641 | cfgpath = self.ui.config(b'convert', b'svn.' + name) |
|
642 | 642 | if cfgpath is not None and cfgpath.strip() == b'': |
|
643 | 643 | return None |
|
644 | 644 | path = (cfgpath or name).strip(b'/') |
|
645 | 645 | if not self.exists(path, rev): |
|
646 | 646 | if self.module.endswith(path) and name == b'trunk': |
|
647 | 647 | # we are converting from inside this directory |
|
648 | 648 | return None |
|
649 | 649 | if cfgpath: |
|
650 | 650 | raise error.Abort( |
|
651 | 651 | _(b'expected %s to be at %r, but not found') |
|
652 | 652 | % (name, path) |
|
653 | 653 | ) |
|
654 | 654 | return None |
|
655 | 655 | self.ui.note( |
|
656 | 656 | _(b'found %s at %r\n') % (name, pycompat.bytestr(path)) |
|
657 | 657 | ) |
|
658 | 658 | return path |
|
659 | 659 | |
|
660 | 660 | rev = optrev(self.last_changed) |
|
661 | 661 | oldmodule = b'' |
|
662 | 662 | trunk = getcfgpath(b'trunk', rev) |
|
663 | 663 | self.tags = getcfgpath(b'tags', rev) |
|
664 | 664 | branches = getcfgpath(b'branches', rev) |
|
665 | 665 | |
|
666 | 666 | # If the project has a trunk or branches, we will extract heads |
|
667 | 667 | # from them. We keep the project root otherwise. |
|
668 | 668 | if trunk: |
|
669 | 669 | oldmodule = self.module or b'' |
|
670 | 670 | self.module += b'/' + trunk |
|
671 | 671 | self.head = self.latest(self.module, self.last_changed) |
|
672 | 672 | if not self.head: |
|
673 | 673 | raise error.Abort( |
|
674 | 674 | _(b'no revision found in module %s') % self.module |
|
675 | 675 | ) |
|
676 | 676 | |
|
677 | 677 | # First head in the list is the module's head |
|
678 | 678 | self.heads = [self.head] |
|
679 | 679 | if self.tags is not None: |
|
680 | 680 | self.tags = b'%s/%s' % (oldmodule, (self.tags or b'tags')) |
|
681 | 681 | |
|
682 | 682 | # Check if branches bring a few more heads to the list |
|
683 | 683 | if branches: |
|
684 | 684 | rpath = self.url.strip(b'/') |
|
685 | 685 | branchnames = svn.client.ls( |
|
686 | 686 | rpath + b'/' + quote(branches), rev, False, self.ctx |
|
687 | 687 | ) |
|
688 | 688 | for branch in sorted(branchnames): |
|
689 | 689 | module = b'%s/%s/%s' % (oldmodule, branches, branch) |
|
690 | 690 | if not isdir(module, self.last_changed): |
|
691 | 691 | continue |
|
692 | 692 | brevid = self.latest(module, self.last_changed) |
|
693 | 693 | if not brevid: |
|
694 | 694 | self.ui.note(_(b'ignoring empty branch %s\n') % branch) |
|
695 | 695 | continue |
|
696 | 696 | self.ui.note( |
|
697 | 697 | _(b'found branch %s at %d\n') |
|
698 | 698 | % (branch, self.revnum(brevid)) |
|
699 | 699 | ) |
|
700 | 700 | self.heads.append(brevid) |
|
701 | 701 | |
|
702 | 702 | if self.startrev and self.heads: |
|
703 | 703 | if len(self.heads) > 1: |
|
704 | 704 | raise error.Abort( |
|
705 | 705 | _( |
|
706 | 706 | b'svn: start revision is not supported ' |
|
707 | 707 | b'with more than one branch' |
|
708 | 708 | ) |
|
709 | 709 | ) |
|
710 | 710 | revnum = self.revnum(self.heads[0]) |
|
711 | 711 | if revnum < self.startrev: |
|
712 | 712 | raise error.Abort( |
|
713 | 713 | _(b'svn: no revision found after start revision %d') |
|
714 | 714 | % self.startrev |
|
715 | 715 | ) |
|
716 | 716 | |
|
717 | 717 | return self.heads |
|
718 | 718 | |
|
719 | 719 | def _getchanges(self, rev, full): |
|
720 | 720 | (paths, parents) = self.paths[rev] |
|
721 | 721 | copies = {} |
|
722 | 722 | if parents: |
|
723 | 723 | files, self.removed, copies = self.expandpaths(rev, paths, parents) |
|
724 | 724 | if full or not parents: |
|
725 | 725 | # Perform a full checkout on roots |
|
726 | 726 | uuid, module, revnum = revsplit(rev) |
|
727 | 727 | entries = svn.client.ls( |
|
728 | 728 | self.baseurl + quote(module), optrev(revnum), True, self.ctx |
|
729 | 729 | ) |
|
730 | 730 | files = [ |
|
731 | 731 | n |
|
732 |
for n, e in |
|
|
732 | for n, e in entries.items() | |
|
733 | 733 | if e.kind == svn.core.svn_node_file |
|
734 | 734 | ] |
|
735 | 735 | self.removed = set() |
|
736 | 736 | |
|
737 | 737 | files.sort() |
|
738 | 738 | files = pycompat.ziplist(files, [rev] * len(files)) |
|
739 | 739 | return (files, copies) |
|
740 | 740 | |
|
741 | 741 | def getchanges(self, rev, full): |
|
742 | 742 | # reuse cache from getchangedfiles |
|
743 | 743 | if self._changescache[0] == rev and not full: |
|
744 | 744 | (files, copies) = self._changescache[1] |
|
745 | 745 | else: |
|
746 | 746 | (files, copies) = self._getchanges(rev, full) |
|
747 | 747 | # caller caches the result, so free it here to release memory |
|
748 | 748 | del self.paths[rev] |
|
749 | 749 | return (files, copies, set()) |
|
750 | 750 | |
|
751 | 751 | def getchangedfiles(self, rev, i): |
|
752 | 752 | # called from filemap - cache computed values for reuse in getchanges |
|
753 | 753 | (files, copies) = self._getchanges(rev, False) |
|
754 | 754 | self._changescache = (rev, (files, copies)) |
|
755 | 755 | return [f[0] for f in files] |
|
756 | 756 | |
|
757 | 757 | def getcommit(self, rev): |
|
758 | 758 | if rev not in self.commits: |
|
759 | 759 | uuid, module, revnum = revsplit(rev) |
|
760 | 760 | self.module = module |
|
761 | 761 | self.reparent(module) |
|
762 | 762 | # We assume that: |
|
763 | 763 | # - requests for revisions after "stop" come from the |
|
764 | 764 | # revision graph backward traversal. Cache all of them |
|
765 | 765 | # down to stop, they will be used eventually. |
|
766 | 766 | # - requests for revisions before "stop" come to get |
|
767 | 767 | # isolated branches parents. Just fetch what is needed. |
|
768 | 768 | stop = self.lastrevs.get(module, 0) |
|
769 | 769 | if revnum < stop: |
|
770 | 770 | stop = revnum + 1 |
|
771 | 771 | self._fetch_revisions(revnum, stop) |
|
772 | 772 | if rev not in self.commits: |
|
773 | 773 | raise error.Abort(_(b'svn: revision %s not found') % revnum) |
|
774 | 774 | revcommit = self.commits[rev] |
|
775 | 775 | # caller caches the result, so free it here to release memory |
|
776 | 776 | del self.commits[rev] |
|
777 | 777 | return revcommit |
|
778 | 778 | |
|
779 | 779 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
780 | 780 | """fails if revision format does not match the correct format""" |
|
781 | 781 | if not re.match( |
|
782 | 782 | br'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-' |
|
783 | 783 | br'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]' |
|
784 | 784 | br'{12,12}(.*)@[0-9]+$', |
|
785 | 785 | revstr, |
|
786 | 786 | ): |
|
787 | 787 | raise error.Abort( |
|
788 | 788 | _(b'%s entry %s is not a valid revision identifier') |
|
789 | 789 | % (mapname, revstr) |
|
790 | 790 | ) |
|
791 | 791 | |
|
792 | 792 | def numcommits(self): |
|
793 | 793 | return int(self.head.rsplit(b'@', 1)[1]) - self.startrev |
|
794 | 794 | |
|
795 | 795 | def gettags(self): |
|
796 | 796 | tags = {} |
|
797 | 797 | if self.tags is None: |
|
798 | 798 | return tags |
|
799 | 799 | |
|
800 | 800 | # svn tags are just a convention, project branches left in a |
|
801 | 801 | # 'tags' directory. There is no other relationship than |
|
802 | 802 | # ancestry, which is expensive to discover and makes them hard |
|
803 | 803 | # to update incrementally. Worse, past revisions may be |
|
804 | 804 | # referenced by tags far away in the future, requiring a deep |
|
805 | 805 | # history traversal on every calculation. Current code |
|
806 | 806 | # performs a single backward traversal, tracking moves within |
|
807 | 807 | # the tags directory (tag renaming) and recording a new tag |
|
808 | 808 | # everytime a project is copied from outside the tags |
|
809 | 809 | # directory. It also lists deleted tags, this behaviour may |
|
810 | 810 | # change in the future. |
|
811 | 811 | pendings = [] |
|
812 | 812 | tagspath = self.tags |
|
813 | 813 | start = svn.ra.get_latest_revnum(self.ra) |
|
814 | 814 | stream = self._getlog([self.tags], start, self.startrev) |
|
815 | 815 | try: |
|
816 | 816 | for entry in stream: |
|
817 | 817 | origpaths, revnum, author, date, message = entry |
|
818 | 818 | if not origpaths: |
|
819 | 819 | origpaths = [] |
|
820 | 820 | copies = [ |
|
821 | 821 | (e.copyfrom_path, e.copyfrom_rev, p) |
|
822 |
for p, e in |
|
|
822 | for p, e in origpaths.items() | |
|
823 | 823 | if e.copyfrom_path |
|
824 | 824 | ] |
|
825 | 825 | # Apply moves/copies from more specific to general |
|
826 | 826 | copies.sort(reverse=True) |
|
827 | 827 | |
|
828 | 828 | srctagspath = tagspath |
|
829 | 829 | if copies and copies[-1][2] == tagspath: |
|
830 | 830 | # Track tags directory moves |
|
831 | 831 | srctagspath = copies.pop()[0] |
|
832 | 832 | |
|
833 | 833 | for source, sourcerev, dest in copies: |
|
834 | 834 | if not dest.startswith(tagspath + b'/'): |
|
835 | 835 | continue |
|
836 | 836 | for tag in pendings: |
|
837 | 837 | if tag[0].startswith(dest): |
|
838 | 838 | tagpath = source + tag[0][len(dest) :] |
|
839 | 839 | tag[:2] = [tagpath, sourcerev] |
|
840 | 840 | break |
|
841 | 841 | else: |
|
842 | 842 | pendings.append([source, sourcerev, dest]) |
|
843 | 843 | |
|
844 | 844 | # Filter out tags with children coming from different |
|
845 | 845 | # parts of the repository like: |
|
846 | 846 | # /tags/tag.1 (from /trunk:10) |
|
847 | 847 | # /tags/tag.1/foo (from /branches/foo:12) |
|
848 | 848 | # Here/tags/tag.1 discarded as well as its children. |
|
849 | 849 | # It happens with tools like cvs2svn. Such tags cannot |
|
850 | 850 | # be represented in mercurial. |
|
851 | 851 | addeds = { |
|
852 | 852 | p: e.copyfrom_path |
|
853 |
for p, e in |
|
|
853 | for p, e in origpaths.items() | |
|
854 | 854 | if e.action == b'A' and e.copyfrom_path |
|
855 | 855 | } |
|
856 | 856 | badroots = set() |
|
857 | 857 | for destroot in addeds: |
|
858 | 858 | for source, sourcerev, dest in pendings: |
|
859 | 859 | if not dest.startswith( |
|
860 | 860 | destroot + b'/' |
|
861 | 861 | ) or source.startswith(addeds[destroot] + b'/'): |
|
862 | 862 | continue |
|
863 | 863 | badroots.add(destroot) |
|
864 | 864 | break |
|
865 | 865 | |
|
866 | 866 | for badroot in badroots: |
|
867 | 867 | pendings = [ |
|
868 | 868 | p |
|
869 | 869 | for p in pendings |
|
870 | 870 | if p[2] != badroot |
|
871 | 871 | and not p[2].startswith(badroot + b'/') |
|
872 | 872 | ] |
|
873 | 873 | |
|
874 | 874 | # Tell tag renamings from tag creations |
|
875 | 875 | renamings = [] |
|
876 | 876 | for source, sourcerev, dest in pendings: |
|
877 | 877 | tagname = dest.split(b'/')[-1] |
|
878 | 878 | if source.startswith(srctagspath): |
|
879 | 879 | renamings.append([source, sourcerev, tagname]) |
|
880 | 880 | continue |
|
881 | 881 | if tagname in tags: |
|
882 | 882 | # Keep the latest tag value |
|
883 | 883 | continue |
|
884 | 884 | # From revision may be fake, get one with changes |
|
885 | 885 | try: |
|
886 | 886 | tagid = self.latest(source, sourcerev) |
|
887 | 887 | if tagid and tagname not in tags: |
|
888 | 888 | tags[tagname] = tagid |
|
889 | 889 | except SvnPathNotFound: |
|
890 | 890 | # It happens when we are following directories |
|
891 | 891 | # we assumed were copied with their parents |
|
892 | 892 | # but were really created in the tag |
|
893 | 893 | # directory. |
|
894 | 894 | pass |
|
895 | 895 | pendings = renamings |
|
896 | 896 | tagspath = srctagspath |
|
897 | 897 | finally: |
|
898 | 898 | stream.close() |
|
899 | 899 | return tags |
|
900 | 900 | |
|
901 | 901 | def converted(self, rev, destrev): |
|
902 | 902 | if not self.wc: |
|
903 | 903 | return |
|
904 | 904 | if self.convertfp is None: |
|
905 | 905 | self.convertfp = open( |
|
906 | 906 | os.path.join(self.wc, b'.svn', b'hg-shamap'), b'ab' |
|
907 | 907 | ) |
|
908 | 908 | self.convertfp.write( |
|
909 | 909 | util.tonativeeol(b'%s %d\n' % (destrev, self.revnum(rev))) |
|
910 | 910 | ) |
|
911 | 911 | self.convertfp.flush() |
|
912 | 912 | |
|
913 | 913 | def revid(self, revnum, module=None): |
|
914 | 914 | return b'svn:%s%s@%d' % (self.uuid, module or self.module, revnum) |
|
915 | 915 | |
|
916 | 916 | def revnum(self, rev): |
|
917 | 917 | return int(rev.split(b'@')[-1]) |
|
918 | 918 | |
|
919 | 919 | def latest(self, path, stop=None): |
|
920 | 920 | """Find the latest revid affecting path, up to stop revision |
|
921 | 921 | number. If stop is None, default to repository latest |
|
922 | 922 | revision. It may return a revision in a different module, |
|
923 | 923 | since a branch may be moved without a change being |
|
924 | 924 | reported. Return None if computed module does not belong to |
|
925 | 925 | rootmodule subtree. |
|
926 | 926 | """ |
|
927 | 927 | |
|
928 | 928 | def findchanges(path, start, stop=None): |
|
929 | 929 | stream = self._getlog([path], start, stop or 1) |
|
930 | 930 | try: |
|
931 | 931 | for entry in stream: |
|
932 | 932 | paths, revnum, author, date, message = entry |
|
933 | 933 | if stop is None and paths: |
|
934 | 934 | # We do not know the latest changed revision, |
|
935 | 935 | # keep the first one with changed paths. |
|
936 | 936 | break |
|
937 | 937 | if stop is not None and revnum <= stop: |
|
938 | 938 | break |
|
939 | 939 | |
|
940 | 940 | for p in paths: |
|
941 | 941 | if not path.startswith(p) or not paths[p].copyfrom_path: |
|
942 | 942 | continue |
|
943 | 943 | newpath = paths[p].copyfrom_path + path[len(p) :] |
|
944 | 944 | self.ui.debug( |
|
945 | 945 | b"branch renamed from %s to %s at %d\n" |
|
946 | 946 | % (path, newpath, revnum) |
|
947 | 947 | ) |
|
948 | 948 | path = newpath |
|
949 | 949 | break |
|
950 | 950 | if not paths: |
|
951 | 951 | revnum = None |
|
952 | 952 | return revnum, path |
|
953 | 953 | finally: |
|
954 | 954 | stream.close() |
|
955 | 955 | |
|
956 | 956 | if not path.startswith(self.rootmodule): |
|
957 | 957 | # Requests on foreign branches may be forbidden at server level |
|
958 | 958 | self.ui.debug(b'ignoring foreign branch %r\n' % path) |
|
959 | 959 | return None |
|
960 | 960 | |
|
961 | 961 | if stop is None: |
|
962 | 962 | stop = svn.ra.get_latest_revnum(self.ra) |
|
963 | 963 | try: |
|
964 | 964 | prevmodule = self.reparent(b'') |
|
965 | 965 | dirent = svn.ra.stat(self.ra, path.strip(b'/'), stop) |
|
966 | 966 | self.reparent(prevmodule) |
|
967 | 967 | except svn.core.SubversionException: |
|
968 | 968 | dirent = None |
|
969 | 969 | if not dirent: |
|
970 | 970 | raise SvnPathNotFound( |
|
971 | 971 | _(b'%s not found up to revision %d') % (path, stop) |
|
972 | 972 | ) |
|
973 | 973 | |
|
974 | 974 | # stat() gives us the previous revision on this line of |
|
975 | 975 | # development, but it might be in *another module*. Fetch the |
|
976 | 976 | # log and detect renames down to the latest revision. |
|
977 | 977 | revnum, realpath = findchanges(path, stop, dirent.created_rev) |
|
978 | 978 | if revnum is None: |
|
979 | 979 | # Tools like svnsync can create empty revision, when |
|
980 | 980 | # synchronizing only a subtree for instance. These empty |
|
981 | 981 | # revisions created_rev still have their original values |
|
982 | 982 | # despite all changes having disappeared and can be |
|
983 | 983 | # returned by ra.stat(), at least when stating the root |
|
984 | 984 | # module. In that case, do not trust created_rev and scan |
|
985 | 985 | # the whole history. |
|
986 | 986 | revnum, realpath = findchanges(path, stop) |
|
987 | 987 | if revnum is None: |
|
988 | 988 | self.ui.debug(b'ignoring empty branch %r\n' % realpath) |
|
989 | 989 | return None |
|
990 | 990 | |
|
991 | 991 | if not realpath.startswith(self.rootmodule): |
|
992 | 992 | self.ui.debug(b'ignoring foreign branch %r\n' % realpath) |
|
993 | 993 | return None |
|
994 | 994 | return self.revid(revnum, realpath) |
|
995 | 995 | |
|
996 | 996 | def reparent(self, module): |
|
997 | 997 | """Reparent the svn transport and return the previous parent.""" |
|
998 | 998 | if self.prevmodule == module: |
|
999 | 999 | return module |
|
1000 | 1000 | svnurl = self.baseurl + quote(module) |
|
1001 | 1001 | prevmodule = self.prevmodule |
|
1002 | 1002 | if prevmodule is None: |
|
1003 | 1003 | prevmodule = b'' |
|
1004 | 1004 | self.ui.debug(b"reparent to %s\n" % svnurl) |
|
1005 | 1005 | svn.ra.reparent(self.ra, svnurl) |
|
1006 | 1006 | self.prevmodule = module |
|
1007 | 1007 | return prevmodule |
|
1008 | 1008 | |
|
1009 | 1009 | def expandpaths(self, rev, paths, parents): |
|
1010 | 1010 | changed, removed = set(), set() |
|
1011 | 1011 | copies = {} |
|
1012 | 1012 | |
|
1013 | 1013 | new_module, revnum = revsplit(rev)[1:] |
|
1014 | 1014 | if new_module != self.module: |
|
1015 | 1015 | self.module = new_module |
|
1016 | 1016 | self.reparent(self.module) |
|
1017 | 1017 | |
|
1018 | 1018 | progress = self.ui.makeprogress( |
|
1019 | 1019 | _(b'scanning paths'), unit=_(b'paths'), total=len(paths) |
|
1020 | 1020 | ) |
|
1021 | 1021 | for i, (path, ent) in enumerate(paths): |
|
1022 | 1022 | progress.update(i, item=path) |
|
1023 | 1023 | entrypath = self.getrelpath(path) |
|
1024 | 1024 | |
|
1025 | 1025 | kind = self._checkpath(entrypath, revnum) |
|
1026 | 1026 | if kind == svn.core.svn_node_file: |
|
1027 | 1027 | changed.add(self.recode(entrypath)) |
|
1028 | 1028 | if not ent.copyfrom_path or not parents: |
|
1029 | 1029 | continue |
|
1030 | 1030 | # Copy sources not in parent revisions cannot be |
|
1031 | 1031 | # represented, ignore their origin for now |
|
1032 | 1032 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
1033 | 1033 | if ent.copyfrom_rev < prevnum: |
|
1034 | 1034 | continue |
|
1035 | 1035 | copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule) |
|
1036 | 1036 | if not copyfrom_path: |
|
1037 | 1037 | continue |
|
1038 | 1038 | self.ui.debug( |
|
1039 | 1039 | b"copied to %s from %s@%d\n" |
|
1040 | 1040 | % (entrypath, copyfrom_path, ent.copyfrom_rev) |
|
1041 | 1041 | ) |
|
1042 | 1042 | copies[self.recode(entrypath)] = self.recode(copyfrom_path) |
|
1043 | 1043 | elif kind == 0: # gone, but had better be a deleted *file* |
|
1044 | 1044 | self.ui.debug(b"gone from %d\n" % ent.copyfrom_rev) |
|
1045 | 1045 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
1046 | 1046 | parentpath = pmodule + b"/" + entrypath |
|
1047 | 1047 | fromkind = self._checkpath(entrypath, prevnum, pmodule) |
|
1048 | 1048 | |
|
1049 | 1049 | if fromkind == svn.core.svn_node_file: |
|
1050 | 1050 | removed.add(self.recode(entrypath)) |
|
1051 | 1051 | elif fromkind == svn.core.svn_node_dir: |
|
1052 | 1052 | oroot = parentpath.strip(b'/') |
|
1053 | 1053 | nroot = path.strip(b'/') |
|
1054 | 1054 | children = self._iterfiles(oroot, prevnum) |
|
1055 | 1055 | for childpath in children: |
|
1056 | 1056 | childpath = childpath.replace(oroot, nroot) |
|
1057 | 1057 | childpath = self.getrelpath(b"/" + childpath, pmodule) |
|
1058 | 1058 | if childpath: |
|
1059 | 1059 | removed.add(self.recode(childpath)) |
|
1060 | 1060 | else: |
|
1061 | 1061 | self.ui.debug( |
|
1062 | 1062 | b'unknown path in revision %d: %s\n' % (revnum, path) |
|
1063 | 1063 | ) |
|
1064 | 1064 | elif kind == svn.core.svn_node_dir: |
|
1065 | 1065 | if ent.action == b'M': |
|
1066 | 1066 | # If the directory just had a prop change, |
|
1067 | 1067 | # then we shouldn't need to look for its children. |
|
1068 | 1068 | continue |
|
1069 | 1069 | if ent.action == b'R' and parents: |
|
1070 | 1070 | # If a directory is replacing a file, mark the previous |
|
1071 | 1071 | # file as deleted |
|
1072 | 1072 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
1073 | 1073 | pkind = self._checkpath(entrypath, prevnum, pmodule) |
|
1074 | 1074 | if pkind == svn.core.svn_node_file: |
|
1075 | 1075 | removed.add(self.recode(entrypath)) |
|
1076 | 1076 | elif pkind == svn.core.svn_node_dir: |
|
1077 | 1077 | # We do not know what files were kept or removed, |
|
1078 | 1078 | # mark them all as changed. |
|
1079 | 1079 | for childpath in self._iterfiles(pmodule, prevnum): |
|
1080 | 1080 | childpath = self.getrelpath(b"/" + childpath) |
|
1081 | 1081 | if childpath: |
|
1082 | 1082 | changed.add(self.recode(childpath)) |
|
1083 | 1083 | |
|
1084 | 1084 | for childpath in self._iterfiles(path, revnum): |
|
1085 | 1085 | childpath = self.getrelpath(b"/" + childpath) |
|
1086 | 1086 | if childpath: |
|
1087 | 1087 | changed.add(self.recode(childpath)) |
|
1088 | 1088 | |
|
1089 | 1089 | # Handle directory copies |
|
1090 | 1090 | if not ent.copyfrom_path or not parents: |
|
1091 | 1091 | continue |
|
1092 | 1092 | # Copy sources not in parent revisions cannot be |
|
1093 | 1093 | # represented, ignore their origin for now |
|
1094 | 1094 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
1095 | 1095 | if ent.copyfrom_rev < prevnum: |
|
1096 | 1096 | continue |
|
1097 | 1097 | copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule) |
|
1098 | 1098 | if not copyfrompath: |
|
1099 | 1099 | continue |
|
1100 | 1100 | self.ui.debug( |
|
1101 | 1101 | b"mark %s came from %s:%d\n" |
|
1102 | 1102 | % (path, copyfrompath, ent.copyfrom_rev) |
|
1103 | 1103 | ) |
|
1104 | 1104 | children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev) |
|
1105 | 1105 | for childpath in children: |
|
1106 | 1106 | childpath = self.getrelpath(b"/" + childpath, pmodule) |
|
1107 | 1107 | if not childpath: |
|
1108 | 1108 | continue |
|
1109 | 1109 | copytopath = path + childpath[len(copyfrompath) :] |
|
1110 | 1110 | copytopath = self.getrelpath(copytopath) |
|
1111 | 1111 | copies[self.recode(copytopath)] = self.recode(childpath) |
|
1112 | 1112 | |
|
1113 | 1113 | progress.complete() |
|
1114 | 1114 | changed.update(removed) |
|
1115 | 1115 | return (list(changed), removed, copies) |
|
1116 | 1116 | |
|
1117 | 1117 | def _fetch_revisions(self, from_revnum, to_revnum): |
|
1118 | 1118 | if from_revnum < to_revnum: |
|
1119 | 1119 | from_revnum, to_revnum = to_revnum, from_revnum |
|
1120 | 1120 | |
|
1121 | 1121 | self.child_cset = None |
|
1122 | 1122 | |
|
1123 | 1123 | def parselogentry(orig_paths, revnum, author, date, message): |
|
1124 | 1124 | """Return the parsed commit object or None, and True if |
|
1125 | 1125 | the revision is a branch root. |
|
1126 | 1126 | """ |
|
1127 | 1127 | self.ui.debug( |
|
1128 | 1128 | b"parsing revision %d (%d changes)\n" |
|
1129 | 1129 | % (revnum, len(orig_paths)) |
|
1130 | 1130 | ) |
|
1131 | 1131 | |
|
1132 | 1132 | branched = False |
|
1133 | 1133 | rev = self.revid(revnum) |
|
1134 | 1134 | # branch log might return entries for a parent we already have |
|
1135 | 1135 | |
|
1136 | 1136 | if rev in self.commits or revnum < to_revnum: |
|
1137 | 1137 | return None, branched |
|
1138 | 1138 | |
|
1139 | 1139 | parents = [] |
|
1140 | 1140 | # check whether this revision is the start of a branch or part |
|
1141 | 1141 | # of a branch renaming |
|
1142 |
orig_paths = sorted( |
|
|
1142 | orig_paths = sorted(orig_paths.items()) | |
|
1143 | 1143 | root_paths = [ |
|
1144 | 1144 | (p, e) for p, e in orig_paths if self.module.startswith(p) |
|
1145 | 1145 | ] |
|
1146 | 1146 | if root_paths: |
|
1147 | 1147 | path, ent = root_paths[-1] |
|
1148 | 1148 | if ent.copyfrom_path: |
|
1149 | 1149 | branched = True |
|
1150 | 1150 | newpath = ent.copyfrom_path + self.module[len(path) :] |
|
1151 | 1151 | # ent.copyfrom_rev may not be the actual last revision |
|
1152 | 1152 | previd = self.latest(newpath, ent.copyfrom_rev) |
|
1153 | 1153 | if previd is not None: |
|
1154 | 1154 | prevmodule, prevnum = revsplit(previd)[1:] |
|
1155 | 1155 | if prevnum >= self.startrev: |
|
1156 | 1156 | parents = [previd] |
|
1157 | 1157 | self.ui.note( |
|
1158 | 1158 | _(b'found parent of branch %s at %d: %s\n') |
|
1159 | 1159 | % (self.module, prevnum, prevmodule) |
|
1160 | 1160 | ) |
|
1161 | 1161 | else: |
|
1162 | 1162 | self.ui.debug(b"no copyfrom path, don't know what to do.\n") |
|
1163 | 1163 | |
|
1164 | 1164 | paths = [] |
|
1165 | 1165 | # filter out unrelated paths |
|
1166 | 1166 | for path, ent in orig_paths: |
|
1167 | 1167 | if self.getrelpath(path) is None: |
|
1168 | 1168 | continue |
|
1169 | 1169 | paths.append((path, ent)) |
|
1170 | 1170 | |
|
1171 | 1171 | date = parsesvndate(date) |
|
1172 | 1172 | if self.ui.configbool(b'convert', b'localtimezone'): |
|
1173 | 1173 | date = makedatetimestamp(date[0]) |
|
1174 | 1174 | |
|
1175 | 1175 | if message: |
|
1176 | 1176 | log = self.recode(message) |
|
1177 | 1177 | else: |
|
1178 | 1178 | log = b'' |
|
1179 | 1179 | |
|
1180 | 1180 | if author: |
|
1181 | 1181 | author = self.recode(author) |
|
1182 | 1182 | else: |
|
1183 | 1183 | author = b'' |
|
1184 | 1184 | |
|
1185 | 1185 | try: |
|
1186 | 1186 | branch = self.module.split(b"/")[-1] |
|
1187 | 1187 | if branch == self.trunkname: |
|
1188 | 1188 | branch = None |
|
1189 | 1189 | except IndexError: |
|
1190 | 1190 | branch = None |
|
1191 | 1191 | |
|
1192 | 1192 | cset = commit( |
|
1193 | 1193 | author=author, |
|
1194 | 1194 | date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'), |
|
1195 | 1195 | desc=log, |
|
1196 | 1196 | parents=parents, |
|
1197 | 1197 | branch=branch, |
|
1198 | 1198 | rev=rev, |
|
1199 | 1199 | ) |
|
1200 | 1200 | |
|
1201 | 1201 | self.commits[rev] = cset |
|
1202 | 1202 | # The parents list is *shared* among self.paths and the |
|
1203 | 1203 | # commit object. Both will be updated below. |
|
1204 | 1204 | self.paths[rev] = (paths, cset.parents) |
|
1205 | 1205 | if self.child_cset and not self.child_cset.parents: |
|
1206 | 1206 | self.child_cset.parents[:] = [rev] |
|
1207 | 1207 | self.child_cset = cset |
|
1208 | 1208 | return cset, branched |
|
1209 | 1209 | |
|
1210 | 1210 | self.ui.note( |
|
1211 | 1211 | _(b'fetching revision log for "%s" from %d to %d\n') |
|
1212 | 1212 | % (self.module, from_revnum, to_revnum) |
|
1213 | 1213 | ) |
|
1214 | 1214 | |
|
1215 | 1215 | try: |
|
1216 | 1216 | firstcset = None |
|
1217 | 1217 | lastonbranch = False |
|
1218 | 1218 | stream = self._getlog([self.module], from_revnum, to_revnum) |
|
1219 | 1219 | try: |
|
1220 | 1220 | for entry in stream: |
|
1221 | 1221 | paths, revnum, author, date, message = entry |
|
1222 | 1222 | if revnum < self.startrev: |
|
1223 | 1223 | lastonbranch = True |
|
1224 | 1224 | break |
|
1225 | 1225 | if not paths: |
|
1226 | 1226 | self.ui.debug(b'revision %d has no entries\n' % revnum) |
|
1227 | 1227 | # If we ever leave the loop on an empty |
|
1228 | 1228 | # revision, do not try to get a parent branch |
|
1229 | 1229 | lastonbranch = lastonbranch or revnum == 0 |
|
1230 | 1230 | continue |
|
1231 | 1231 | cset, lastonbranch = parselogentry( |
|
1232 | 1232 | paths, revnum, author, date, message |
|
1233 | 1233 | ) |
|
1234 | 1234 | if cset: |
|
1235 | 1235 | firstcset = cset |
|
1236 | 1236 | if lastonbranch: |
|
1237 | 1237 | break |
|
1238 | 1238 | finally: |
|
1239 | 1239 | stream.close() |
|
1240 | 1240 | |
|
1241 | 1241 | if not lastonbranch and firstcset and not firstcset.parents: |
|
1242 | 1242 | # The first revision of the sequence (the last fetched one) |
|
1243 | 1243 | # has invalid parents if not a branch root. Find the parent |
|
1244 | 1244 | # revision now, if any. |
|
1245 | 1245 | try: |
|
1246 | 1246 | firstrevnum = self.revnum(firstcset.rev) |
|
1247 | 1247 | if firstrevnum > 1: |
|
1248 | 1248 | latest = self.latest(self.module, firstrevnum - 1) |
|
1249 | 1249 | if latest: |
|
1250 | 1250 | firstcset.parents.append(latest) |
|
1251 | 1251 | except SvnPathNotFound: |
|
1252 | 1252 | pass |
|
1253 | 1253 | except svn.core.SubversionException as xxx_todo_changeme: |
|
1254 | 1254 | (inst, num) = xxx_todo_changeme.args |
|
1255 | 1255 | if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION: |
|
1256 | 1256 | raise error.Abort( |
|
1257 | 1257 | _(b'svn: branch has no revision %s') % to_revnum |
|
1258 | 1258 | ) |
|
1259 | 1259 | raise |
|
1260 | 1260 | |
|
1261 | 1261 | def getfile(self, file, rev): |
|
1262 | 1262 | # TODO: ra.get_file transmits the whole file instead of diffs. |
|
1263 | 1263 | if file in self.removed: |
|
1264 | 1264 | return None, None |
|
1265 | 1265 | try: |
|
1266 | 1266 | new_module, revnum = revsplit(rev)[1:] |
|
1267 | 1267 | if self.module != new_module: |
|
1268 | 1268 | self.module = new_module |
|
1269 | 1269 | self.reparent(self.module) |
|
1270 | 1270 | io = stringio() |
|
1271 | 1271 | info = svn.ra.get_file(self.ra, file, revnum, io) |
|
1272 | 1272 | data = io.getvalue() |
|
1273 | 1273 | # ra.get_file() seems to keep a reference on the input buffer |
|
1274 | 1274 | # preventing collection. Release it explicitly. |
|
1275 | 1275 | io.close() |
|
1276 | 1276 | if isinstance(info, list): |
|
1277 | 1277 | info = info[-1] |
|
1278 | 1278 | mode = (b"svn:executable" in info) and b'x' or b'' |
|
1279 | 1279 | mode = (b"svn:special" in info) and b'l' or mode |
|
1280 | 1280 | except svn.core.SubversionException as e: |
|
1281 | 1281 | notfound = ( |
|
1282 | 1282 | svn.core.SVN_ERR_FS_NOT_FOUND, |
|
1283 | 1283 | svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND, |
|
1284 | 1284 | ) |
|
1285 | 1285 | if e.apr_err in notfound: # File not found |
|
1286 | 1286 | return None, None |
|
1287 | 1287 | raise |
|
1288 | 1288 | if mode == b'l': |
|
1289 | 1289 | link_prefix = b"link " |
|
1290 | 1290 | if data.startswith(link_prefix): |
|
1291 | 1291 | data = data[len(link_prefix) :] |
|
1292 | 1292 | return data, mode |
|
1293 | 1293 | |
|
1294 | 1294 | def _iterfiles(self, path, revnum): |
|
1295 | 1295 | """Enumerate all files in path at revnum, recursively.""" |
|
1296 | 1296 | path = path.strip(b'/') |
|
1297 | 1297 | pool = svn.core.Pool() |
|
1298 | 1298 | rpath = b'/'.join([self.baseurl, quote(path)]).strip(b'/') |
|
1299 | 1299 | entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool) |
|
1300 | 1300 | if path: |
|
1301 | 1301 | path += b'/' |
|
1302 | 1302 | return ( |
|
1303 | 1303 | (path + p) |
|
1304 |
for p, e in |
|
|
1304 | for p, e in entries.items() | |
|
1305 | 1305 | if e.kind == svn.core.svn_node_file |
|
1306 | 1306 | ) |
|
1307 | 1307 | |
|
1308 | 1308 | def getrelpath(self, path, module=None): |
|
1309 | 1309 | if module is None: |
|
1310 | 1310 | module = self.module |
|
1311 | 1311 | # Given the repository url of this wc, say |
|
1312 | 1312 | # "http://server/plone/CMFPlone/branches/Plone-2_0-branch" |
|
1313 | 1313 | # extract the "entry" portion (a relative path) from what |
|
1314 | 1314 | # svn log --xml says, i.e. |
|
1315 | 1315 | # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py" |
|
1316 | 1316 | # that is to say "tests/PloneTestCase.py" |
|
1317 | 1317 | if path.startswith(module): |
|
1318 | 1318 | relative = path.rstrip(b'/')[len(module) :] |
|
1319 | 1319 | if relative.startswith(b'/'): |
|
1320 | 1320 | return relative[1:] |
|
1321 | 1321 | elif relative == b'': |
|
1322 | 1322 | return relative |
|
1323 | 1323 | |
|
1324 | 1324 | # The path is outside our tracked tree... |
|
1325 | 1325 | self.ui.debug( |
|
1326 | 1326 | b'%r is not under %r, ignoring\n' |
|
1327 | 1327 | % (pycompat.bytestr(path), pycompat.bytestr(module)) |
|
1328 | 1328 | ) |
|
1329 | 1329 | return None |
|
1330 | 1330 | |
|
1331 | 1331 | def _checkpath(self, path, revnum, module=None): |
|
1332 | 1332 | if module is not None: |
|
1333 | 1333 | prevmodule = self.reparent(b'') |
|
1334 | 1334 | path = module + b'/' + path |
|
1335 | 1335 | try: |
|
1336 | 1336 | # ra.check_path does not like leading slashes very much, it leads |
|
1337 | 1337 | # to PROPFIND subversion errors |
|
1338 | 1338 | return svn.ra.check_path(self.ra, path.strip(b'/'), revnum) |
|
1339 | 1339 | finally: |
|
1340 | 1340 | if module is not None: |
|
1341 | 1341 | self.reparent(prevmodule) |
|
1342 | 1342 | |
|
1343 | 1343 | def _getlog( |
|
1344 | 1344 | self, |
|
1345 | 1345 | paths, |
|
1346 | 1346 | start, |
|
1347 | 1347 | end, |
|
1348 | 1348 | limit=0, |
|
1349 | 1349 | discover_changed_paths=True, |
|
1350 | 1350 | strict_node_history=False, |
|
1351 | 1351 | ): |
|
1352 | 1352 | # Normalize path names, svn >= 1.5 only wants paths relative to |
|
1353 | 1353 | # supplied URL |
|
1354 | 1354 | relpaths = [] |
|
1355 | 1355 | for p in paths: |
|
1356 | 1356 | if not p.startswith(b'/'): |
|
1357 | 1357 | p = self.module + b'/' + p |
|
1358 | 1358 | relpaths.append(p.strip(b'/')) |
|
1359 | 1359 | args = [ |
|
1360 | 1360 | self.baseurl, |
|
1361 | 1361 | relpaths, |
|
1362 | 1362 | start, |
|
1363 | 1363 | end, |
|
1364 | 1364 | limit, |
|
1365 | 1365 | discover_changed_paths, |
|
1366 | 1366 | strict_node_history, |
|
1367 | 1367 | ] |
|
1368 | 1368 | # developer config: convert.svn.debugsvnlog |
|
1369 | 1369 | if not self.ui.configbool(b'convert', b'svn.debugsvnlog'): |
|
1370 | 1370 | return directlogstream(*args) |
|
1371 | 1371 | arg = encodeargs(args) |
|
1372 | 1372 | hgexe = procutil.hgexecutable() |
|
1373 | 1373 | cmd = b'%s debugsvnlog' % procutil.shellquote(hgexe) |
|
1374 | 1374 | stdin, stdout = procutil.popen2(cmd) |
|
1375 | 1375 | stdin.write(arg) |
|
1376 | 1376 | try: |
|
1377 | 1377 | stdin.close() |
|
1378 | 1378 | except IOError: |
|
1379 | 1379 | raise error.Abort( |
|
1380 | 1380 | _( |
|
1381 | 1381 | b'Mercurial failed to run itself, check' |
|
1382 | 1382 | b' hg executable is in PATH' |
|
1383 | 1383 | ) |
|
1384 | 1384 | ) |
|
1385 | 1385 | return logstream(stdout) |
|
1386 | 1386 | |
|
1387 | 1387 | |
|
1388 | 1388 | pre_revprop_change_template = b'''#!/bin/sh |
|
1389 | 1389 | |
|
1390 | 1390 | REPOS="$1" |
|
1391 | 1391 | REV="$2" |
|
1392 | 1392 | USER="$3" |
|
1393 | 1393 | PROPNAME="$4" |
|
1394 | 1394 | ACTION="$5" |
|
1395 | 1395 | |
|
1396 | 1396 | %(rules)s |
|
1397 | 1397 | |
|
1398 | 1398 | echo "Changing prohibited revision property" >&2 |
|
1399 | 1399 | exit 1 |
|
1400 | 1400 | ''' |
|
1401 | 1401 | |
|
1402 | 1402 | |
|
1403 | 1403 | def gen_pre_revprop_change_hook(prop_actions_allowed): |
|
1404 | 1404 | rules = [] |
|
1405 | 1405 | for action, propname in prop_actions_allowed: |
|
1406 | 1406 | rules.append( |
|
1407 | 1407 | ( |
|
1408 | 1408 | b'if [ "$ACTION" = "%s" -a "$PROPNAME" = "%s" ]; ' |
|
1409 | 1409 | b'then exit 0; fi' |
|
1410 | 1410 | ) |
|
1411 | 1411 | % (action, propname) |
|
1412 | 1412 | ) |
|
1413 | 1413 | return pre_revprop_change_template % {b'rules': b'\n'.join(rules)} |
|
1414 | 1414 | |
|
1415 | 1415 | |
|
1416 | 1416 | class svn_sink(converter_sink, commandline): |
|
1417 | 1417 | commit_re = re.compile(br'Committed revision (\d+).', re.M) |
|
1418 | 1418 | uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M) |
|
1419 | 1419 | |
|
1420 | 1420 | def prerun(self): |
|
1421 | 1421 | if self.wc: |
|
1422 | 1422 | os.chdir(self.wc) |
|
1423 | 1423 | |
|
1424 | 1424 | def postrun(self): |
|
1425 | 1425 | if self.wc: |
|
1426 | 1426 | os.chdir(self.cwd) |
|
1427 | 1427 | |
|
1428 | 1428 | def join(self, name): |
|
1429 | 1429 | return os.path.join(self.wc, b'.svn', name) |
|
1430 | 1430 | |
|
1431 | 1431 | def revmapfile(self): |
|
1432 | 1432 | return self.join(b'hg-shamap') |
|
1433 | 1433 | |
|
1434 | 1434 | def authorfile(self): |
|
1435 | 1435 | return self.join(b'hg-authormap') |
|
1436 | 1436 | |
|
1437 | 1437 | def __init__(self, ui, repotype, path): |
|
1438 | 1438 | |
|
1439 | 1439 | converter_sink.__init__(self, ui, repotype, path) |
|
1440 | 1440 | commandline.__init__(self, ui, b'svn') |
|
1441 | 1441 | self.delete = [] |
|
1442 | 1442 | self.setexec = [] |
|
1443 | 1443 | self.delexec = [] |
|
1444 | 1444 | self.copies = [] |
|
1445 | 1445 | self.wc = None |
|
1446 | 1446 | self.cwd = encoding.getcwd() |
|
1447 | 1447 | |
|
1448 | 1448 | created = False |
|
1449 | 1449 | if os.path.isfile(os.path.join(path, b'.svn', b'entries')): |
|
1450 | 1450 | self.wc = os.path.realpath(path) |
|
1451 | 1451 | self.run0(b'update') |
|
1452 | 1452 | else: |
|
1453 | 1453 | if not re.search(br'^(file|http|https|svn|svn\+ssh)://', path): |
|
1454 | 1454 | path = os.path.realpath(path) |
|
1455 | 1455 | if os.path.isdir(os.path.dirname(path)): |
|
1456 | 1456 | if not os.path.exists( |
|
1457 | 1457 | os.path.join(path, b'db', b'fs-type') |
|
1458 | 1458 | ): |
|
1459 | 1459 | ui.status( |
|
1460 | 1460 | _(b"initializing svn repository '%s'\n") |
|
1461 | 1461 | % os.path.basename(path) |
|
1462 | 1462 | ) |
|
1463 | 1463 | commandline(ui, b'svnadmin').run0(b'create', path) |
|
1464 | 1464 | created = path |
|
1465 | 1465 | path = util.normpath(path) |
|
1466 | 1466 | if not path.startswith(b'/'): |
|
1467 | 1467 | path = b'/' + path |
|
1468 | 1468 | path = b'file://' + path |
|
1469 | 1469 | |
|
1470 | 1470 | wcpath = os.path.join( |
|
1471 | 1471 | encoding.getcwd(), os.path.basename(path) + b'-wc' |
|
1472 | 1472 | ) |
|
1473 | 1473 | ui.status( |
|
1474 | 1474 | _(b"initializing svn working copy '%s'\n") |
|
1475 | 1475 | % os.path.basename(wcpath) |
|
1476 | 1476 | ) |
|
1477 | 1477 | self.run0(b'checkout', path, wcpath) |
|
1478 | 1478 | |
|
1479 | 1479 | self.wc = wcpath |
|
1480 | 1480 | self.opener = vfsmod.vfs(self.wc) |
|
1481 | 1481 | self.wopener = vfsmod.vfs(self.wc) |
|
1482 | 1482 | self.childmap = mapfile(ui, self.join(b'hg-childmap')) |
|
1483 | 1483 | if util.checkexec(self.wc): |
|
1484 | 1484 | self.is_exec = util.isexec |
|
1485 | 1485 | else: |
|
1486 | 1486 | self.is_exec = None |
|
1487 | 1487 | |
|
1488 | 1488 | if created: |
|
1489 | 1489 | prop_actions_allowed = [ |
|
1490 | 1490 | (b'M', b'svn:log'), |
|
1491 | 1491 | (b'A', b'hg:convert-branch'), |
|
1492 | 1492 | (b'A', b'hg:convert-rev'), |
|
1493 | 1493 | ] |
|
1494 | 1494 | |
|
1495 | 1495 | if self.ui.configbool( |
|
1496 | 1496 | b'convert', b'svn.dangerous-set-commit-dates' |
|
1497 | 1497 | ): |
|
1498 | 1498 | prop_actions_allowed.append((b'M', b'svn:date')) |
|
1499 | 1499 | |
|
1500 | 1500 | hook = os.path.join(created, b'hooks', b'pre-revprop-change') |
|
1501 | 1501 | fp = open(hook, b'wb') |
|
1502 | 1502 | fp.write(gen_pre_revprop_change_hook(prop_actions_allowed)) |
|
1503 | 1503 | fp.close() |
|
1504 | 1504 | util.setflags(hook, False, True) |
|
1505 | 1505 | |
|
1506 | 1506 | output = self.run0(b'info') |
|
1507 | 1507 | self.uuid = self.uuid_re.search(output).group(1).strip() |
|
1508 | 1508 | |
|
1509 | 1509 | def wjoin(self, *names): |
|
1510 | 1510 | return os.path.join(self.wc, *names) |
|
1511 | 1511 | |
|
1512 | 1512 | @propertycache |
|
1513 | 1513 | def manifest(self): |
|
1514 | 1514 | # As of svn 1.7, the "add" command fails when receiving |
|
1515 | 1515 | # already tracked entries, so we have to track and filter them |
|
1516 | 1516 | # ourselves. |
|
1517 | 1517 | m = set() |
|
1518 | 1518 | output = self.run0(b'ls', recursive=True, xml=True) |
|
1519 | 1519 | doc = xml.dom.minidom.parseString(output) |
|
1520 | 1520 | for e in doc.getElementsByTagName('entry'): |
|
1521 | 1521 | for n in e.childNodes: |
|
1522 | 1522 | if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name': |
|
1523 | 1523 | continue |
|
1524 | 1524 | name = ''.join( |
|
1525 | 1525 | c.data for c in n.childNodes if c.nodeType == c.TEXT_NODE |
|
1526 | 1526 | ) |
|
1527 | 1527 | # Entries are compared with names coming from |
|
1528 | 1528 | # mercurial, so bytes with undefined encoding. Our |
|
1529 | 1529 | # best bet is to assume they are in local |
|
1530 | 1530 | # encoding. They will be passed to command line calls |
|
1531 | 1531 | # later anyway, so they better be. |
|
1532 | 1532 | m.add(encoding.unitolocal(name)) |
|
1533 | 1533 | break |
|
1534 | 1534 | return m |
|
1535 | 1535 | |
|
1536 | 1536 | def putfile(self, filename, flags, data): |
|
1537 | 1537 | if b'l' in flags: |
|
1538 | 1538 | self.wopener.symlink(data, filename) |
|
1539 | 1539 | else: |
|
1540 | 1540 | try: |
|
1541 | 1541 | if os.path.islink(self.wjoin(filename)): |
|
1542 | 1542 | os.unlink(filename) |
|
1543 | 1543 | except OSError: |
|
1544 | 1544 | pass |
|
1545 | 1545 | |
|
1546 | 1546 | if self.is_exec: |
|
1547 | 1547 | # We need to check executability of the file before the change, |
|
1548 | 1548 | # because `vfs.write` is able to reset exec bit. |
|
1549 | 1549 | wasexec = False |
|
1550 | 1550 | if os.path.exists(self.wjoin(filename)): |
|
1551 | 1551 | wasexec = self.is_exec(self.wjoin(filename)) |
|
1552 | 1552 | |
|
1553 | 1553 | self.wopener.write(filename, data) |
|
1554 | 1554 | |
|
1555 | 1555 | if self.is_exec: |
|
1556 | 1556 | if wasexec: |
|
1557 | 1557 | if b'x' not in flags: |
|
1558 | 1558 | self.delexec.append(filename) |
|
1559 | 1559 | else: |
|
1560 | 1560 | if b'x' in flags: |
|
1561 | 1561 | self.setexec.append(filename) |
|
1562 | 1562 | util.setflags(self.wjoin(filename), False, b'x' in flags) |
|
1563 | 1563 | |
|
1564 | 1564 | def _copyfile(self, source, dest): |
|
1565 | 1565 | # SVN's copy command pukes if the destination file exists, but |
|
1566 | 1566 | # our copyfile method expects to record a copy that has |
|
1567 | 1567 | # already occurred. Cross the semantic gap. |
|
1568 | 1568 | wdest = self.wjoin(dest) |
|
1569 | 1569 | exists = os.path.lexists(wdest) |
|
1570 | 1570 | if exists: |
|
1571 | 1571 | fd, tempname = pycompat.mkstemp( |
|
1572 | 1572 | prefix=b'hg-copy-', dir=os.path.dirname(wdest) |
|
1573 | 1573 | ) |
|
1574 | 1574 | os.close(fd) |
|
1575 | 1575 | os.unlink(tempname) |
|
1576 | 1576 | os.rename(wdest, tempname) |
|
1577 | 1577 | try: |
|
1578 | 1578 | self.run0(b'copy', source, dest) |
|
1579 | 1579 | finally: |
|
1580 | 1580 | self.manifest.add(dest) |
|
1581 | 1581 | if exists: |
|
1582 | 1582 | try: |
|
1583 | 1583 | os.unlink(wdest) |
|
1584 | 1584 | except OSError: |
|
1585 | 1585 | pass |
|
1586 | 1586 | os.rename(tempname, wdest) |
|
1587 | 1587 | |
|
1588 | 1588 | def dirs_of(self, files): |
|
1589 | 1589 | dirs = set() |
|
1590 | 1590 | for f in files: |
|
1591 | 1591 | if os.path.isdir(self.wjoin(f)): |
|
1592 | 1592 | dirs.add(f) |
|
1593 | 1593 | i = len(f) |
|
1594 | 1594 | for i in iter(lambda: f.rfind(b'/', 0, i), -1): |
|
1595 | 1595 | dirs.add(f[:i]) |
|
1596 | 1596 | return dirs |
|
1597 | 1597 | |
|
1598 | 1598 | def add_dirs(self, files): |
|
1599 | 1599 | add_dirs = [ |
|
1600 | 1600 | d for d in sorted(self.dirs_of(files)) if d not in self.manifest |
|
1601 | 1601 | ] |
|
1602 | 1602 | if add_dirs: |
|
1603 | 1603 | self.manifest.update(add_dirs) |
|
1604 | 1604 | self.xargs(add_dirs, b'add', non_recursive=True, quiet=True) |
|
1605 | 1605 | return add_dirs |
|
1606 | 1606 | |
|
1607 | 1607 | def add_files(self, files): |
|
1608 | 1608 | files = [f for f in files if f not in self.manifest] |
|
1609 | 1609 | if files: |
|
1610 | 1610 | self.manifest.update(files) |
|
1611 | 1611 | self.xargs(files, b'add', quiet=True) |
|
1612 | 1612 | return files |
|
1613 | 1613 | |
|
1614 | 1614 | def addchild(self, parent, child): |
|
1615 | 1615 | self.childmap[parent] = child |
|
1616 | 1616 | |
|
1617 | 1617 | def revid(self, rev): |
|
1618 | 1618 | return b"svn:%s@%s" % (self.uuid, rev) |
|
1619 | 1619 | |
|
1620 | 1620 | def putcommit( |
|
1621 | 1621 | self, files, copies, parents, commit, source, revmap, full, cleanp2 |
|
1622 | 1622 | ): |
|
1623 | 1623 | for parent in parents: |
|
1624 | 1624 | try: |
|
1625 | 1625 | return self.revid(self.childmap[parent]) |
|
1626 | 1626 | except KeyError: |
|
1627 | 1627 | pass |
|
1628 | 1628 | |
|
1629 | 1629 | # Apply changes to working copy |
|
1630 | 1630 | for f, v in files: |
|
1631 | 1631 | data, mode = source.getfile(f, v) |
|
1632 | 1632 | if data is None: |
|
1633 | 1633 | self.delete.append(f) |
|
1634 | 1634 | else: |
|
1635 | 1635 | self.putfile(f, mode, data) |
|
1636 | 1636 | if f in copies: |
|
1637 | 1637 | self.copies.append([copies[f], f]) |
|
1638 | 1638 | if full: |
|
1639 | 1639 | self.delete.extend(sorted(self.manifest.difference(files))) |
|
1640 | 1640 | files = [f[0] for f in files] |
|
1641 | 1641 | |
|
1642 | 1642 | entries = set(self.delete) |
|
1643 | 1643 | files = frozenset(files) |
|
1644 | 1644 | entries.update(self.add_dirs(files.difference(entries))) |
|
1645 | 1645 | if self.copies: |
|
1646 | 1646 | for s, d in self.copies: |
|
1647 | 1647 | self._copyfile(s, d) |
|
1648 | 1648 | self.copies = [] |
|
1649 | 1649 | if self.delete: |
|
1650 | 1650 | self.xargs(self.delete, b'delete') |
|
1651 | 1651 | for f in self.delete: |
|
1652 | 1652 | self.manifest.remove(f) |
|
1653 | 1653 | self.delete = [] |
|
1654 | 1654 | entries.update(self.add_files(files.difference(entries))) |
|
1655 | 1655 | if self.delexec: |
|
1656 | 1656 | self.xargs(self.delexec, b'propdel', b'svn:executable') |
|
1657 | 1657 | self.delexec = [] |
|
1658 | 1658 | if self.setexec: |
|
1659 | 1659 | self.xargs(self.setexec, b'propset', b'svn:executable', b'*') |
|
1660 | 1660 | self.setexec = [] |
|
1661 | 1661 | |
|
1662 | 1662 | fd, messagefile = pycompat.mkstemp(prefix=b'hg-convert-') |
|
1663 | 1663 | fp = os.fdopen(fd, 'wb') |
|
1664 | 1664 | fp.write(util.tonativeeol(commit.desc)) |
|
1665 | 1665 | fp.close() |
|
1666 | 1666 | try: |
|
1667 | 1667 | output = self.run0( |
|
1668 | 1668 | b'commit', |
|
1669 | 1669 | username=stringutil.shortuser(commit.author), |
|
1670 | 1670 | file=messagefile, |
|
1671 | 1671 | encoding=b'utf-8', |
|
1672 | 1672 | ) |
|
1673 | 1673 | try: |
|
1674 | 1674 | rev = self.commit_re.search(output).group(1) |
|
1675 | 1675 | except AttributeError: |
|
1676 | 1676 | if not files: |
|
1677 | 1677 | return parents[0] if parents else b'None' |
|
1678 | 1678 | self.ui.warn(_(b'unexpected svn output:\n')) |
|
1679 | 1679 | self.ui.warn(output) |
|
1680 | 1680 | raise error.Abort(_(b'unable to cope with svn output')) |
|
1681 | 1681 | if commit.rev: |
|
1682 | 1682 | self.run( |
|
1683 | 1683 | b'propset', |
|
1684 | 1684 | b'hg:convert-rev', |
|
1685 | 1685 | commit.rev, |
|
1686 | 1686 | revprop=True, |
|
1687 | 1687 | revision=rev, |
|
1688 | 1688 | ) |
|
1689 | 1689 | if commit.branch and commit.branch != b'default': |
|
1690 | 1690 | self.run( |
|
1691 | 1691 | b'propset', |
|
1692 | 1692 | b'hg:convert-branch', |
|
1693 | 1693 | commit.branch, |
|
1694 | 1694 | revprop=True, |
|
1695 | 1695 | revision=rev, |
|
1696 | 1696 | ) |
|
1697 | 1697 | |
|
1698 | 1698 | if self.ui.configbool( |
|
1699 | 1699 | b'convert', b'svn.dangerous-set-commit-dates' |
|
1700 | 1700 | ): |
|
1701 | 1701 | # Subverson always uses UTC to represent date and time |
|
1702 | 1702 | date = dateutil.parsedate(commit.date) |
|
1703 | 1703 | date = (date[0], 0) |
|
1704 | 1704 | |
|
1705 | 1705 | # The only way to set date and time for svn commit is to use propset after commit is done |
|
1706 | 1706 | self.run( |
|
1707 | 1707 | b'propset', |
|
1708 | 1708 | b'svn:date', |
|
1709 | 1709 | formatsvndate(date), |
|
1710 | 1710 | revprop=True, |
|
1711 | 1711 | revision=rev, |
|
1712 | 1712 | ) |
|
1713 | 1713 | |
|
1714 | 1714 | for parent in parents: |
|
1715 | 1715 | self.addchild(parent, rev) |
|
1716 | 1716 | return self.revid(rev) |
|
1717 | 1717 | finally: |
|
1718 | 1718 | os.unlink(messagefile) |
|
1719 | 1719 | |
|
1720 | 1720 | def puttags(self, tags): |
|
1721 | 1721 | self.ui.warn(_(b'writing Subversion tags is not yet implemented\n')) |
|
1722 | 1722 | return None, None |
|
1723 | 1723 | |
|
1724 | 1724 | def hascommitfrommap(self, rev): |
|
1725 | 1725 | # We trust that revisions referenced in a map still is present |
|
1726 | 1726 | # TODO: implement something better if necessary and feasible |
|
1727 | 1727 | return True |
|
1728 | 1728 | |
|
1729 | 1729 | def hascommitforsplicemap(self, rev): |
|
1730 | 1730 | # This is not correct as one can convert to an existing subversion |
|
1731 | 1731 | # repository and childmap would not list all revisions. Too bad. |
|
1732 | 1732 | if rev in self.childmap: |
|
1733 | 1733 | return True |
|
1734 | 1734 | raise error.Abort( |
|
1735 | 1735 | _( |
|
1736 | 1736 | b'splice map revision %s not found in subversion ' |
|
1737 | 1737 | b'child map (revision lookups are not implemented)' |
|
1738 | 1738 | ) |
|
1739 | 1739 | % rev |
|
1740 | 1740 | ) |
@@ -1,479 +1,479 b'' | |||
|
1 | 1 | """automatically manage newlines in repository files |
|
2 | 2 | |
|
3 | 3 | This extension allows you to manage the type of line endings (CRLF or |
|
4 | 4 | LF) that are used in the repository and in the local working |
|
5 | 5 | directory. That way you can get CRLF line endings on Windows and LF on |
|
6 | 6 | Unix/Mac, thereby letting everybody use their OS native line endings. |
|
7 | 7 | |
|
8 | 8 | The extension reads its configuration from a versioned ``.hgeol`` |
|
9 | 9 | configuration file found in the root of the working directory. The |
|
10 | 10 | ``.hgeol`` file use the same syntax as all other Mercurial |
|
11 | 11 | configuration files. It uses two sections, ``[patterns]`` and |
|
12 | 12 | ``[repository]``. |
|
13 | 13 | |
|
14 | 14 | The ``[patterns]`` section specifies how line endings should be |
|
15 | 15 | converted between the working directory and the repository. The format is |
|
16 | 16 | specified by a file pattern. The first match is used, so put more |
|
17 | 17 | specific patterns first. The available line endings are ``LF``, |
|
18 | 18 | ``CRLF``, and ``BIN``. |
|
19 | 19 | |
|
20 | 20 | Files with the declared format of ``CRLF`` or ``LF`` are always |
|
21 | 21 | checked out and stored in the repository in that format and files |
|
22 | 22 | declared to be binary (``BIN``) are left unchanged. Additionally, |
|
23 | 23 | ``native`` is an alias for checking out in the platform's default line |
|
24 | 24 | ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on |
|
25 | 25 | Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's |
|
26 | 26 | default behavior; it is only needed if you need to override a later, |
|
27 | 27 | more general pattern. |
|
28 | 28 | |
|
29 | 29 | The optional ``[repository]`` section specifies the line endings to |
|
30 | 30 | use for files stored in the repository. It has a single setting, |
|
31 | 31 | ``native``, which determines the storage line endings for files |
|
32 | 32 | declared as ``native`` in the ``[patterns]`` section. It can be set to |
|
33 | 33 | ``LF`` or ``CRLF``. The default is ``LF``. For example, this means |
|
34 | 34 | that on Windows, files configured as ``native`` (``CRLF`` by default) |
|
35 | 35 | will be converted to ``LF`` when stored in the repository. Files |
|
36 | 36 | declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section |
|
37 | 37 | are always stored as-is in the repository. |
|
38 | 38 | |
|
39 | 39 | Example versioned ``.hgeol`` file:: |
|
40 | 40 | |
|
41 | 41 | [patterns] |
|
42 | 42 | **.py = native |
|
43 | 43 | **.vcproj = CRLF |
|
44 | 44 | **.txt = native |
|
45 | 45 | Makefile = LF |
|
46 | 46 | **.jpg = BIN |
|
47 | 47 | |
|
48 | 48 | [repository] |
|
49 | 49 | native = LF |
|
50 | 50 | |
|
51 | 51 | .. note:: |
|
52 | 52 | |
|
53 | 53 | The rules will first apply when files are touched in the working |
|
54 | 54 | directory, e.g. by updating to null and back to tip to touch all files. |
|
55 | 55 | |
|
56 | 56 | The extension uses an optional ``[eol]`` section read from both the |
|
57 | 57 | normal Mercurial configuration files and the ``.hgeol`` file, with the |
|
58 | 58 | latter overriding the former. You can use that section to control the |
|
59 | 59 | overall behavior. There are three settings: |
|
60 | 60 | |
|
61 | 61 | - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or |
|
62 | 62 | ``CRLF`` to override the default interpretation of ``native`` for |
|
63 | 63 | checkout. This can be used with :hg:`archive` on Unix, say, to |
|
64 | 64 | generate an archive where files have line endings for Windows. |
|
65 | 65 | |
|
66 | 66 | - ``eol.only-consistent`` (default True) can be set to False to make |
|
67 | 67 | the extension convert files with inconsistent EOLs. Inconsistent |
|
68 | 68 | means that there is both ``CRLF`` and ``LF`` present in the file. |
|
69 | 69 | Such files are normally not touched under the assumption that they |
|
70 | 70 | have mixed EOLs on purpose. |
|
71 | 71 | |
|
72 | 72 | - ``eol.fix-trailing-newline`` (default False) can be set to True to |
|
73 | 73 | ensure that converted files end with a EOL character (either ``\\n`` |
|
74 | 74 | or ``\\r\\n`` as per the configured patterns). |
|
75 | 75 | |
|
76 | 76 | The extension provides ``cleverencode:`` and ``cleverdecode:`` filters |
|
77 | 77 | like the deprecated win32text extension does. This means that you can |
|
78 | 78 | disable win32text and enable eol and your filters will still work. You |
|
79 | 79 | only need to these filters until you have prepared a ``.hgeol`` file. |
|
80 | 80 | |
|
81 | 81 | The ``win32text.forbid*`` hooks provided by the win32text extension |
|
82 | 82 | have been unified into a single hook named ``eol.checkheadshook``. The |
|
83 | 83 | hook will lookup the expected line endings from the ``.hgeol`` file, |
|
84 | 84 | which means you must migrate to a ``.hgeol`` file first before using |
|
85 | 85 | the hook. ``eol.checkheadshook`` only checks heads, intermediate |
|
86 | 86 | invalid revisions will be pushed. To forbid them completely, use the |
|
87 | 87 | ``eol.checkallhook`` hook. These hooks are best used as |
|
88 | 88 | ``pretxnchangegroup`` hooks. |
|
89 | 89 | |
|
90 | 90 | See :hg:`help patterns` for more information about the glob patterns |
|
91 | 91 | used. |
|
92 | 92 | """ |
|
93 | 93 | |
|
94 | 94 | |
|
95 | 95 | import os |
|
96 | 96 | import re |
|
97 | 97 | from mercurial.i18n import _ |
|
98 | 98 | from mercurial import ( |
|
99 | 99 | config, |
|
100 | 100 | error as errormod, |
|
101 | 101 | extensions, |
|
102 | 102 | match, |
|
103 | 103 | pycompat, |
|
104 | 104 | registrar, |
|
105 | 105 | scmutil, |
|
106 | 106 | util, |
|
107 | 107 | ) |
|
108 | 108 | from mercurial.utils import stringutil |
|
109 | 109 | |
|
110 | 110 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
111 | 111 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
112 | 112 | # be specifying the version(s) of Mercurial they are tested with, or |
|
113 | 113 | # leave the attribute unspecified. |
|
114 | 114 | testedwith = b'ships-with-hg-core' |
|
115 | 115 | |
|
116 | 116 | configtable = {} |
|
117 | 117 | configitem = registrar.configitem(configtable) |
|
118 | 118 | |
|
119 | 119 | configitem( |
|
120 | 120 | b'eol', |
|
121 | 121 | b'fix-trailing-newline', |
|
122 | 122 | default=False, |
|
123 | 123 | ) |
|
124 | 124 | configitem( |
|
125 | 125 | b'eol', |
|
126 | 126 | b'native', |
|
127 | 127 | default=pycompat.oslinesep, |
|
128 | 128 | ) |
|
129 | 129 | configitem( |
|
130 | 130 | b'eol', |
|
131 | 131 | b'only-consistent', |
|
132 | 132 | default=True, |
|
133 | 133 | ) |
|
134 | 134 | |
|
135 | 135 | # Matches a lone LF, i.e., one that is not part of CRLF. |
|
136 | 136 | singlelf = re.compile(b'(^|[^\r])\n') |
|
137 | 137 | |
|
138 | 138 | |
|
139 | 139 | def inconsistenteol(data): |
|
140 | 140 | return b'\r\n' in data and singlelf.search(data) |
|
141 | 141 | |
|
142 | 142 | |
|
143 | 143 | def tolf(s, params, ui, **kwargs): |
|
144 | 144 | """Filter to convert to LF EOLs.""" |
|
145 | 145 | if stringutil.binary(s): |
|
146 | 146 | return s |
|
147 | 147 | if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s): |
|
148 | 148 | return s |
|
149 | 149 | if ( |
|
150 | 150 | ui.configbool(b'eol', b'fix-trailing-newline') |
|
151 | 151 | and s |
|
152 | 152 | and not s.endswith(b'\n') |
|
153 | 153 | ): |
|
154 | 154 | s = s + b'\n' |
|
155 | 155 | return util.tolf(s) |
|
156 | 156 | |
|
157 | 157 | |
|
158 | 158 | def tocrlf(s, params, ui, **kwargs): |
|
159 | 159 | """Filter to convert to CRLF EOLs.""" |
|
160 | 160 | if stringutil.binary(s): |
|
161 | 161 | return s |
|
162 | 162 | if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s): |
|
163 | 163 | return s |
|
164 | 164 | if ( |
|
165 | 165 | ui.configbool(b'eol', b'fix-trailing-newline') |
|
166 | 166 | and s |
|
167 | 167 | and not s.endswith(b'\n') |
|
168 | 168 | ): |
|
169 | 169 | s = s + b'\n' |
|
170 | 170 | return util.tocrlf(s) |
|
171 | 171 | |
|
172 | 172 | |
|
173 | 173 | def isbinary(s, params, ui, **kwargs): |
|
174 | 174 | """Filter to do nothing with the file.""" |
|
175 | 175 | return s |
|
176 | 176 | |
|
177 | 177 | |
|
178 | 178 | filters = { |
|
179 | 179 | b'to-lf': tolf, |
|
180 | 180 | b'to-crlf': tocrlf, |
|
181 | 181 | b'is-binary': isbinary, |
|
182 | 182 | # The following provide backwards compatibility with win32text |
|
183 | 183 | b'cleverencode:': tolf, |
|
184 | 184 | b'cleverdecode:': tocrlf, |
|
185 | 185 | } |
|
186 | 186 | |
|
187 | 187 | |
|
188 | 188 | class eolfile(object): |
|
189 | 189 | def __init__(self, ui, root, data): |
|
190 | 190 | self._decode = { |
|
191 | 191 | b'LF': b'to-lf', |
|
192 | 192 | b'CRLF': b'to-crlf', |
|
193 | 193 | b'BIN': b'is-binary', |
|
194 | 194 | } |
|
195 | 195 | self._encode = { |
|
196 | 196 | b'LF': b'to-lf', |
|
197 | 197 | b'CRLF': b'to-crlf', |
|
198 | 198 | b'BIN': b'is-binary', |
|
199 | 199 | } |
|
200 | 200 | |
|
201 | 201 | self.cfg = config.config() |
|
202 | 202 | # Our files should not be touched. The pattern must be |
|
203 | 203 | # inserted first override a '** = native' pattern. |
|
204 | 204 | self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol') |
|
205 | 205 | # We can then parse the user's patterns. |
|
206 | 206 | self.cfg.parse(b'.hgeol', data) |
|
207 | 207 | |
|
208 | 208 | isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF' |
|
209 | 209 | self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf' |
|
210 | 210 | iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n') |
|
211 | 211 | self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf' |
|
212 | 212 | |
|
213 | 213 | include = [] |
|
214 | 214 | exclude = [] |
|
215 | 215 | self.patterns = [] |
|
216 | 216 | for pattern, style in self.cfg.items(b'patterns'): |
|
217 | 217 | key = style.upper() |
|
218 | 218 | if key == b'BIN': |
|
219 | 219 | exclude.append(pattern) |
|
220 | 220 | else: |
|
221 | 221 | include.append(pattern) |
|
222 | 222 | m = match.match(root, b'', [pattern]) |
|
223 | 223 | self.patterns.append((pattern, key, m)) |
|
224 | 224 | # This will match the files for which we need to care |
|
225 | 225 | # about inconsistent newlines. |
|
226 | 226 | self.match = match.match(root, b'', [], include, exclude) |
|
227 | 227 | |
|
228 | 228 | def copytoui(self, ui): |
|
229 | 229 | newpatterns = {pattern for pattern, key, m in self.patterns} |
|
230 | 230 | for section in (b'decode', b'encode'): |
|
231 | 231 | for oldpattern, _filter in ui.configitems(section): |
|
232 | 232 | if oldpattern not in newpatterns: |
|
233 | 233 | if ui.configsource(section, oldpattern) == b'eol': |
|
234 | 234 | ui.setconfig(section, oldpattern, b'!', b'eol') |
|
235 | 235 | for pattern, key, m in self.patterns: |
|
236 | 236 | try: |
|
237 | 237 | ui.setconfig(b'decode', pattern, self._decode[key], b'eol') |
|
238 | 238 | ui.setconfig(b'encode', pattern, self._encode[key], b'eol') |
|
239 | 239 | except KeyError: |
|
240 | 240 | ui.warn( |
|
241 | 241 | _(b"ignoring unknown EOL style '%s' from %s\n") |
|
242 | 242 | % (key, self.cfg.source(b'patterns', pattern)) |
|
243 | 243 | ) |
|
244 | 244 | # eol.only-consistent can be specified in ~/.hgrc or .hgeol |
|
245 | 245 | for k, v in self.cfg.items(b'eol'): |
|
246 | 246 | ui.setconfig(b'eol', k, v, b'eol') |
|
247 | 247 | |
|
248 | 248 | def checkrev(self, repo, ctx, files): |
|
249 | 249 | failed = [] |
|
250 | 250 | for f in files or ctx.files(): |
|
251 | 251 | if f not in ctx: |
|
252 | 252 | continue |
|
253 | 253 | for pattern, key, m in self.patterns: |
|
254 | 254 | if not m(f): |
|
255 | 255 | continue |
|
256 | 256 | target = self._encode[key] |
|
257 | 257 | data = ctx[f].data() |
|
258 | 258 | if ( |
|
259 | 259 | target == b"to-lf" |
|
260 | 260 | and b"\r\n" in data |
|
261 | 261 | or target == b"to-crlf" |
|
262 | 262 | and singlelf.search(data) |
|
263 | 263 | ): |
|
264 | 264 | failed.append((f, target, bytes(ctx))) |
|
265 | 265 | break |
|
266 | 266 | return failed |
|
267 | 267 | |
|
268 | 268 | |
|
269 | 269 | def parseeol(ui, repo, nodes): |
|
270 | 270 | try: |
|
271 | 271 | for node in nodes: |
|
272 | 272 | try: |
|
273 | 273 | if node is None: |
|
274 | 274 | # Cannot use workingctx.data() since it would load |
|
275 | 275 | # and cache the filters before we configure them. |
|
276 | 276 | data = repo.wvfs(b'.hgeol').read() |
|
277 | 277 | else: |
|
278 | 278 | data = repo[node][b'.hgeol'].data() |
|
279 | 279 | return eolfile(ui, repo.root, data) |
|
280 | 280 | except (IOError, LookupError): |
|
281 | 281 | pass |
|
282 | 282 | except errormod.ConfigError as inst: |
|
283 | 283 | ui.warn( |
|
284 | 284 | _( |
|
285 | 285 | b"warning: ignoring .hgeol file due to parse error " |
|
286 | 286 | b"at %s: %s\n" |
|
287 | 287 | ) |
|
288 | 288 | % (inst.location, inst.message) |
|
289 | 289 | ) |
|
290 | 290 | return None |
|
291 | 291 | |
|
292 | 292 | |
|
293 | 293 | def ensureenabled(ui): |
|
294 | 294 | """make sure the extension is enabled when used as hook |
|
295 | 295 | |
|
296 | 296 | When eol is used through hooks, the extension is never formally loaded and |
|
297 | 297 | enabled. This has some side effect, for example the config declaration is |
|
298 | 298 | never loaded. This function ensure the extension is enabled when running |
|
299 | 299 | hooks. |
|
300 | 300 | """ |
|
301 | 301 | if b'eol' in ui._knownconfig: |
|
302 | 302 | return |
|
303 | 303 | ui.setconfig(b'extensions', b'eol', b'', source=b'internal') |
|
304 | 304 | extensions.loadall(ui, [b'eol']) |
|
305 | 305 | |
|
306 | 306 | |
|
307 | 307 | def _checkhook(ui, repo, node, headsonly): |
|
308 | 308 | # Get revisions to check and touched files at the same time |
|
309 | 309 | ensureenabled(ui) |
|
310 | 310 | files = set() |
|
311 | 311 | revs = set() |
|
312 | 312 | for rev in pycompat.xrange(repo[node].rev(), len(repo)): |
|
313 | 313 | revs.add(rev) |
|
314 | 314 | if headsonly: |
|
315 | 315 | ctx = repo[rev] |
|
316 | 316 | files.update(ctx.files()) |
|
317 | 317 | for pctx in ctx.parents(): |
|
318 | 318 | revs.discard(pctx.rev()) |
|
319 | 319 | failed = [] |
|
320 | 320 | for rev in revs: |
|
321 | 321 | ctx = repo[rev] |
|
322 | 322 | eol = parseeol(ui, repo, [ctx.node()]) |
|
323 | 323 | if eol: |
|
324 | 324 | failed.extend(eol.checkrev(repo, ctx, files)) |
|
325 | 325 | |
|
326 | 326 | if failed: |
|
327 | 327 | eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'} |
|
328 | 328 | msgs = [] |
|
329 | 329 | for f, target, node in sorted(failed): |
|
330 | 330 | msgs.append( |
|
331 | 331 | _(b" %s in %s should not have %s line endings") |
|
332 | 332 | % (f, node, eols[target]) |
|
333 | 333 | ) |
|
334 | 334 | raise errormod.Abort( |
|
335 | 335 | _(b"end-of-line check failed:\n") + b"\n".join(msgs) |
|
336 | 336 | ) |
|
337 | 337 | |
|
338 | 338 | |
|
339 | 339 | def checkallhook(ui, repo, node, hooktype, **kwargs): |
|
340 | 340 | """verify that files have expected EOLs""" |
|
341 | 341 | _checkhook(ui, repo, node, False) |
|
342 | 342 | |
|
343 | 343 | |
|
344 | 344 | def checkheadshook(ui, repo, node, hooktype, **kwargs): |
|
345 | 345 | """verify that files have expected EOLs""" |
|
346 | 346 | _checkhook(ui, repo, node, True) |
|
347 | 347 | |
|
348 | 348 | |
|
349 | 349 | # "checkheadshook" used to be called "hook" |
|
350 | 350 | hook = checkheadshook |
|
351 | 351 | |
|
352 | 352 | |
|
353 | 353 | def preupdate(ui, repo, hooktype, parent1, parent2): |
|
354 | 354 | p1node = scmutil.resolvehexnodeidprefix(repo, parent1) |
|
355 | 355 | repo.loadeol([p1node]) |
|
356 | 356 | return False |
|
357 | 357 | |
|
358 | 358 | |
|
359 | 359 | def uisetup(ui): |
|
360 | 360 | ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol') |
|
361 | 361 | |
|
362 | 362 | |
|
363 | 363 | def extsetup(ui): |
|
364 | 364 | try: |
|
365 | 365 | extensions.find(b'win32text') |
|
366 | 366 | ui.warn( |
|
367 | 367 | _( |
|
368 | 368 | b"the eol extension is incompatible with the " |
|
369 | 369 | b"win32text extension\n" |
|
370 | 370 | ) |
|
371 | 371 | ) |
|
372 | 372 | except KeyError: |
|
373 | 373 | pass |
|
374 | 374 | |
|
375 | 375 | |
|
376 | 376 | def reposetup(ui, repo): |
|
377 | 377 | uisetup(repo.ui) |
|
378 | 378 | |
|
379 | 379 | if not repo.local(): |
|
380 | 380 | return |
|
381 |
for name, fn in |
|
|
381 | for name, fn in filters.items(): | |
|
382 | 382 | repo.adddatafilter(name, fn) |
|
383 | 383 | |
|
384 | 384 | ui.setconfig(b'patch', b'eol', b'auto', b'eol') |
|
385 | 385 | |
|
386 | 386 | class eolrepo(repo.__class__): |
|
387 | 387 | def loadeol(self, nodes): |
|
388 | 388 | eol = parseeol(self.ui, self, nodes) |
|
389 | 389 | if eol is None: |
|
390 | 390 | return None |
|
391 | 391 | eol.copytoui(self.ui) |
|
392 | 392 | return eol.match |
|
393 | 393 | |
|
394 | 394 | def _hgcleardirstate(self): |
|
395 | 395 | self._eolmatch = self.loadeol([None]) |
|
396 | 396 | if not self._eolmatch: |
|
397 | 397 | self._eolmatch = util.never |
|
398 | 398 | return |
|
399 | 399 | |
|
400 | 400 | oldeol = None |
|
401 | 401 | try: |
|
402 | 402 | cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache")) |
|
403 | 403 | except OSError: |
|
404 | 404 | cachemtime = 0 |
|
405 | 405 | else: |
|
406 | 406 | olddata = self.vfs.read(b"eol.cache") |
|
407 | 407 | if olddata: |
|
408 | 408 | oldeol = eolfile(self.ui, self.root, olddata) |
|
409 | 409 | |
|
410 | 410 | try: |
|
411 | 411 | eolmtime = os.path.getmtime(self.wjoin(b".hgeol")) |
|
412 | 412 | except OSError: |
|
413 | 413 | eolmtime = 0 |
|
414 | 414 | |
|
415 | 415 | if eolmtime >= cachemtime and eolmtime > 0: |
|
416 | 416 | self.ui.debug(b"eol: detected change in .hgeol\n") |
|
417 | 417 | |
|
418 | 418 | hgeoldata = self.wvfs.read(b'.hgeol') |
|
419 | 419 | neweol = eolfile(self.ui, self.root, hgeoldata) |
|
420 | 420 | |
|
421 | 421 | wlock = None |
|
422 | 422 | try: |
|
423 | 423 | wlock = self.wlock() |
|
424 | 424 | for f in self.dirstate: |
|
425 | 425 | if not self.dirstate.get_entry(f).maybe_clean: |
|
426 | 426 | continue |
|
427 | 427 | if oldeol is not None: |
|
428 | 428 | if not oldeol.match(f) and not neweol.match(f): |
|
429 | 429 | continue |
|
430 | 430 | oldkey = None |
|
431 | 431 | for pattern, key, m in oldeol.patterns: |
|
432 | 432 | if m(f): |
|
433 | 433 | oldkey = key |
|
434 | 434 | break |
|
435 | 435 | newkey = None |
|
436 | 436 | for pattern, key, m in neweol.patterns: |
|
437 | 437 | if m(f): |
|
438 | 438 | newkey = key |
|
439 | 439 | break |
|
440 | 440 | if oldkey == newkey: |
|
441 | 441 | continue |
|
442 | 442 | # all normal files need to be looked at again since |
|
443 | 443 | # the new .hgeol file specify a different filter |
|
444 | 444 | self.dirstate.set_possibly_dirty(f) |
|
445 | 445 | # Write the cache to update mtime and cache .hgeol |
|
446 | 446 | with self.vfs(b"eol.cache", b"w") as f: |
|
447 | 447 | f.write(hgeoldata) |
|
448 | 448 | except errormod.LockUnavailable: |
|
449 | 449 | # If we cannot lock the repository and clear the |
|
450 | 450 | # dirstate, then a commit might not see all files |
|
451 | 451 | # as modified. But if we cannot lock the |
|
452 | 452 | # repository, then we can also not make a commit, |
|
453 | 453 | # so ignore the error. |
|
454 | 454 | pass |
|
455 | 455 | finally: |
|
456 | 456 | if wlock is not None: |
|
457 | 457 | wlock.release() |
|
458 | 458 | |
|
459 | 459 | def commitctx(self, ctx, error=False, origctx=None): |
|
460 | 460 | for f in sorted(ctx.added() + ctx.modified()): |
|
461 | 461 | if not self._eolmatch(f): |
|
462 | 462 | continue |
|
463 | 463 | fctx = ctx[f] |
|
464 | 464 | if fctx is None: |
|
465 | 465 | continue |
|
466 | 466 | data = fctx.data() |
|
467 | 467 | if stringutil.binary(data): |
|
468 | 468 | # We should not abort here, since the user should |
|
469 | 469 | # be able to say "** = native" to automatically |
|
470 | 470 | # have all non-binary files taken care of. |
|
471 | 471 | continue |
|
472 | 472 | if inconsistenteol(data): |
|
473 | 473 | raise errormod.Abort( |
|
474 | 474 | _(b"inconsistent newline style in %s\n") % f |
|
475 | 475 | ) |
|
476 | 476 | return super(eolrepo, self).commitctx(ctx, error, origctx) |
|
477 | 477 | |
|
478 | 478 | repo.__class__ = eolrepo |
|
479 | 479 | repo._hgcleardirstate() |
@@ -1,858 +1,858 b'' | |||
|
1 | 1 | # Copyright 2016-present Facebook. All Rights Reserved. |
|
2 | 2 | # |
|
3 | 3 | # context: context needed to annotate a file |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | |
|
9 | 9 | import collections |
|
10 | 10 | import contextlib |
|
11 | 11 | import os |
|
12 | 12 | |
|
13 | 13 | from mercurial.i18n import _ |
|
14 | 14 | from mercurial.pycompat import ( |
|
15 | 15 | getattr, |
|
16 | 16 | open, |
|
17 | 17 | setattr, |
|
18 | 18 | ) |
|
19 | 19 | from mercurial.node import ( |
|
20 | 20 | bin, |
|
21 | 21 | hex, |
|
22 | 22 | short, |
|
23 | 23 | ) |
|
24 | 24 | from mercurial import ( |
|
25 | 25 | error, |
|
26 | 26 | linelog as linelogmod, |
|
27 | 27 | lock as lockmod, |
|
28 | 28 | mdiff, |
|
29 | 29 | pycompat, |
|
30 | 30 | scmutil, |
|
31 | 31 | util, |
|
32 | 32 | ) |
|
33 | 33 | from mercurial.utils import ( |
|
34 | 34 | hashutil, |
|
35 | 35 | stringutil, |
|
36 | 36 | ) |
|
37 | 37 | |
|
38 | 38 | from . import ( |
|
39 | 39 | error as faerror, |
|
40 | 40 | revmap as revmapmod, |
|
41 | 41 | ) |
|
42 | 42 | |
|
43 | 43 | # given path, get filelog, cached |
|
44 | 44 | @util.lrucachefunc |
|
45 | 45 | def _getflog(repo, path): |
|
46 | 46 | return repo.file(path) |
|
47 | 47 | |
|
48 | 48 | |
|
49 | 49 | # extracted from mercurial.context.basefilectx.annotate |
|
50 | 50 | def _parents(f, follow=True): |
|
51 | 51 | # Cut _descendantrev here to mitigate the penalty of lazy linkrev |
|
52 | 52 | # adjustment. Otherwise, p._adjustlinkrev() would walk changelog |
|
53 | 53 | # from the topmost introrev (= srcrev) down to p.linkrev() if it |
|
54 | 54 | # isn't an ancestor of the srcrev. |
|
55 | 55 | f._changeid |
|
56 | 56 | pl = f.parents() |
|
57 | 57 | |
|
58 | 58 | # Don't return renamed parents if we aren't following. |
|
59 | 59 | if not follow: |
|
60 | 60 | pl = [p for p in pl if p.path() == f.path()] |
|
61 | 61 | |
|
62 | 62 | # renamed filectx won't have a filelog yet, so set it |
|
63 | 63 | # from the cache to save time |
|
64 | 64 | for p in pl: |
|
65 | 65 | if not '_filelog' in p.__dict__: |
|
66 | 66 | p._filelog = _getflog(f._repo, p.path()) |
|
67 | 67 | |
|
68 | 68 | return pl |
|
69 | 69 | |
|
70 | 70 | |
|
71 | 71 | # extracted from mercurial.context.basefilectx.annotate. slightly modified |
|
72 | 72 | # so it takes a fctx instead of a pair of text and fctx. |
|
73 | 73 | def _decorate(fctx): |
|
74 | 74 | text = fctx.data() |
|
75 | 75 | linecount = text.count(b'\n') |
|
76 | 76 | if text and not text.endswith(b'\n'): |
|
77 | 77 | linecount += 1 |
|
78 | 78 | return ([(fctx, i) for i in pycompat.xrange(linecount)], text) |
|
79 | 79 | |
|
80 | 80 | |
|
81 | 81 | # extracted from mercurial.context.basefilectx.annotate. slightly modified |
|
82 | 82 | # so it takes an extra "blocks" parameter calculated elsewhere, instead of |
|
83 | 83 | # calculating diff here. |
|
84 | 84 | def _pair(parent, child, blocks): |
|
85 | 85 | for (a1, a2, b1, b2), t in blocks: |
|
86 | 86 | # Changed blocks ('!') or blocks made only of blank lines ('~') |
|
87 | 87 | # belong to the child. |
|
88 | 88 | if t == b'=': |
|
89 | 89 | child[0][b1:b2] = parent[0][a1:a2] |
|
90 | 90 | return child |
|
91 | 91 | |
|
92 | 92 | |
|
93 | 93 | # like scmutil.revsingle, but with lru cache, so their states (like manifests) |
|
94 | 94 | # could be reused |
|
95 | 95 | _revsingle = util.lrucachefunc(scmutil.revsingle) |
|
96 | 96 | |
|
97 | 97 | |
|
98 | 98 | def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None): |
|
99 | 99 | """(repo, str, str) -> fctx |
|
100 | 100 | |
|
101 | 101 | get the filectx object from repo, rev, path, in an efficient way. |
|
102 | 102 | |
|
103 | 103 | if resolverev is True, "rev" is a revision specified by the revset |
|
104 | 104 | language, otherwise "rev" is a nodeid, or a revision number that can |
|
105 | 105 | be consumed by repo.__getitem__. |
|
106 | 106 | |
|
107 | 107 | if adjustctx is not None, the returned fctx will point to a changeset |
|
108 | 108 | that introduces the change (last modified the file). if adjustctx |
|
109 | 109 | is 'linkrev', trust the linkrev and do not adjust it. this is noticeably |
|
110 | 110 | faster for big repos but is incorrect for some cases. |
|
111 | 111 | """ |
|
112 | 112 | if resolverev and not isinstance(rev, int) and rev is not None: |
|
113 | 113 | ctx = _revsingle(repo, rev) |
|
114 | 114 | else: |
|
115 | 115 | ctx = repo[rev] |
|
116 | 116 | |
|
117 | 117 | # If we don't need to adjust the linkrev, create the filectx using the |
|
118 | 118 | # changectx instead of using ctx[path]. This means it already has the |
|
119 | 119 | # changectx information, so blame -u will be able to look directly at the |
|
120 | 120 | # commitctx object instead of having to resolve it by going through the |
|
121 | 121 | # manifest. In a lazy-manifest world this can prevent us from downloading a |
|
122 | 122 | # lot of data. |
|
123 | 123 | if adjustctx is None: |
|
124 | 124 | # ctx.rev() is None means it's the working copy, which is a special |
|
125 | 125 | # case. |
|
126 | 126 | if ctx.rev() is None: |
|
127 | 127 | fctx = ctx[path] |
|
128 | 128 | else: |
|
129 | 129 | fctx = repo.filectx(path, changeid=ctx.rev()) |
|
130 | 130 | else: |
|
131 | 131 | fctx = ctx[path] |
|
132 | 132 | if adjustctx == b'linkrev': |
|
133 | 133 | introrev = fctx.linkrev() |
|
134 | 134 | else: |
|
135 | 135 | introrev = fctx.introrev() |
|
136 | 136 | if introrev != ctx.rev(): |
|
137 | 137 | fctx._changeid = introrev |
|
138 | 138 | fctx._changectx = repo[introrev] |
|
139 | 139 | return fctx |
|
140 | 140 | |
|
141 | 141 | |
|
142 | 142 | # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock |
|
143 | 143 | def encodedir(path): |
|
144 | 144 | return ( |
|
145 | 145 | path.replace(b'.hg/', b'.hg.hg/') |
|
146 | 146 | .replace(b'.l/', b'.l.hg/') |
|
147 | 147 | .replace(b'.m/', b'.m.hg/') |
|
148 | 148 | .replace(b'.lock/', b'.lock.hg/') |
|
149 | 149 | ) |
|
150 | 150 | |
|
151 | 151 | |
|
152 | 152 | def hashdiffopts(diffopts): |
|
153 | 153 | diffoptstr = stringutil.pprint( |
|
154 | 154 | sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults) |
|
155 | 155 | ) |
|
156 | 156 | return hex(hashutil.sha1(diffoptstr).digest())[:6] |
|
157 | 157 | |
|
158 | 158 | |
|
159 | 159 | _defaultdiffopthash = hashdiffopts(mdiff.defaultopts) |
|
160 | 160 | |
|
161 | 161 | |
|
162 | 162 | class annotateopts(object): |
|
163 | 163 | """like mercurial.mdiff.diffopts, but is for annotate |
|
164 | 164 | |
|
165 | 165 | followrename: follow renames, like "hg annotate -f" |
|
166 | 166 | followmerge: follow p2 of a merge changeset, otherwise p2 is ignored |
|
167 | 167 | """ |
|
168 | 168 | |
|
169 | 169 | defaults = { |
|
170 | 170 | b'diffopts': None, |
|
171 | 171 | b'followrename': True, |
|
172 | 172 | b'followmerge': True, |
|
173 | 173 | } |
|
174 | 174 | |
|
175 | 175 | def __init__(self, **opts): |
|
176 | 176 | opts = pycompat.byteskwargs(opts) |
|
177 |
for k, v in |
|
|
177 | for k, v in self.defaults.items(): | |
|
178 | 178 | setattr(self, k, opts.get(k, v)) |
|
179 | 179 | |
|
180 | 180 | @util.propertycache |
|
181 | 181 | def shortstr(self): |
|
182 | 182 | """represent opts in a short string, suitable for a directory name""" |
|
183 | 183 | result = b'' |
|
184 | 184 | if not self.followrename: |
|
185 | 185 | result += b'r0' |
|
186 | 186 | if not self.followmerge: |
|
187 | 187 | result += b'm0' |
|
188 | 188 | if self.diffopts is not None: |
|
189 | 189 | assert isinstance(self.diffopts, mdiff.diffopts) |
|
190 | 190 | diffopthash = hashdiffopts(self.diffopts) |
|
191 | 191 | if diffopthash != _defaultdiffopthash: |
|
192 | 192 | result += b'i' + diffopthash |
|
193 | 193 | return result or b'default' |
|
194 | 194 | |
|
195 | 195 | |
|
196 | 196 | defaultopts = annotateopts() |
|
197 | 197 | |
|
198 | 198 | |
|
199 | 199 | class _annotatecontext(object): |
|
200 | 200 | """do not use this class directly as it does not use lock to protect |
|
201 | 201 | writes. use "with annotatecontext(...)" instead. |
|
202 | 202 | """ |
|
203 | 203 | |
|
204 | 204 | def __init__(self, repo, path, linelogpath, revmappath, opts): |
|
205 | 205 | self.repo = repo |
|
206 | 206 | self.ui = repo.ui |
|
207 | 207 | self.path = path |
|
208 | 208 | self.opts = opts |
|
209 | 209 | self.linelogpath = linelogpath |
|
210 | 210 | self.revmappath = revmappath |
|
211 | 211 | self._linelog = None |
|
212 | 212 | self._revmap = None |
|
213 | 213 | self._node2path = {} # {str: str} |
|
214 | 214 | |
|
215 | 215 | @property |
|
216 | 216 | def linelog(self): |
|
217 | 217 | if self._linelog is None: |
|
218 | 218 | if os.path.exists(self.linelogpath): |
|
219 | 219 | with open(self.linelogpath, b'rb') as f: |
|
220 | 220 | try: |
|
221 | 221 | self._linelog = linelogmod.linelog.fromdata(f.read()) |
|
222 | 222 | except linelogmod.LineLogError: |
|
223 | 223 | self._linelog = linelogmod.linelog() |
|
224 | 224 | else: |
|
225 | 225 | self._linelog = linelogmod.linelog() |
|
226 | 226 | return self._linelog |
|
227 | 227 | |
|
228 | 228 | @property |
|
229 | 229 | def revmap(self): |
|
230 | 230 | if self._revmap is None: |
|
231 | 231 | self._revmap = revmapmod.revmap(self.revmappath) |
|
232 | 232 | return self._revmap |
|
233 | 233 | |
|
234 | 234 | def close(self): |
|
235 | 235 | if self._revmap is not None: |
|
236 | 236 | self._revmap.flush() |
|
237 | 237 | self._revmap = None |
|
238 | 238 | if self._linelog is not None: |
|
239 | 239 | with open(self.linelogpath, b'wb') as f: |
|
240 | 240 | f.write(self._linelog.encode()) |
|
241 | 241 | self._linelog = None |
|
242 | 242 | |
|
243 | 243 | __del__ = close |
|
244 | 244 | |
|
245 | 245 | def rebuild(self): |
|
246 | 246 | """delete linelog and revmap, useful for rebuilding""" |
|
247 | 247 | self.close() |
|
248 | 248 | self._node2path.clear() |
|
249 | 249 | _unlinkpaths([self.revmappath, self.linelogpath]) |
|
250 | 250 | |
|
251 | 251 | @property |
|
252 | 252 | def lastnode(self): |
|
253 | 253 | """return last node in revmap, or None if revmap is empty""" |
|
254 | 254 | if self._revmap is None: |
|
255 | 255 | # fast path, read revmap without loading its full content |
|
256 | 256 | return revmapmod.getlastnode(self.revmappath) |
|
257 | 257 | else: |
|
258 | 258 | return self._revmap.rev2hsh(self._revmap.maxrev) |
|
259 | 259 | |
|
260 | 260 | def isuptodate(self, master, strict=True): |
|
261 | 261 | """return True if the revmap / linelog is up-to-date, or the file |
|
262 | 262 | does not exist in the master revision. False otherwise. |
|
263 | 263 | |
|
264 | 264 | it tries to be fast and could return false negatives, because of the |
|
265 | 265 | use of linkrev instead of introrev. |
|
266 | 266 | |
|
267 | 267 | useful for both server and client to decide whether to update |
|
268 | 268 | fastannotate cache or not. |
|
269 | 269 | |
|
270 | 270 | if strict is True, even if fctx exists in the revmap, but is not the |
|
271 | 271 | last node, isuptodate will return False. it's good for performance - no |
|
272 | 272 | expensive check was done. |
|
273 | 273 | |
|
274 | 274 | if strict is False, if fctx exists in the revmap, this function may |
|
275 | 275 | return True. this is useful for the client to skip downloading the |
|
276 | 276 | cache if the client's master is behind the server's. |
|
277 | 277 | """ |
|
278 | 278 | lastnode = self.lastnode |
|
279 | 279 | try: |
|
280 | 280 | f = self._resolvefctx(master, resolverev=True) |
|
281 | 281 | # choose linkrev instead of introrev as the check is meant to be |
|
282 | 282 | # *fast*. |
|
283 | 283 | linknode = self.repo.changelog.node(f.linkrev()) |
|
284 | 284 | if not strict and lastnode and linknode != lastnode: |
|
285 | 285 | # check if f.node() is in the revmap. note: this loads the |
|
286 | 286 | # revmap and can be slow. |
|
287 | 287 | return self.revmap.hsh2rev(linknode) is not None |
|
288 | 288 | # avoid resolving old manifest, or slow adjustlinkrev to be fast, |
|
289 | 289 | # false negatives are acceptable in this case. |
|
290 | 290 | return linknode == lastnode |
|
291 | 291 | except LookupError: |
|
292 | 292 | # master does not have the file, or the revmap is ahead |
|
293 | 293 | return True |
|
294 | 294 | |
|
295 | 295 | def annotate(self, rev, master=None, showpath=False, showlines=False): |
|
296 | 296 | """incrementally update the cache so it includes revisions in the main |
|
297 | 297 | branch till 'master'. and run annotate on 'rev', which may or may not be |
|
298 | 298 | included in the main branch. |
|
299 | 299 | |
|
300 | 300 | if master is None, do not update linelog. |
|
301 | 301 | |
|
302 | 302 | the first value returned is the annotate result, it is [(node, linenum)] |
|
303 | 303 | by default. [(node, linenum, path)] if showpath is True. |
|
304 | 304 | |
|
305 | 305 | if showlines is True, a second value will be returned, it is a list of |
|
306 | 306 | corresponding line contents. |
|
307 | 307 | """ |
|
308 | 308 | |
|
309 | 309 | # the fast path test requires commit hash, convert rev number to hash, |
|
310 | 310 | # so it may hit the fast path. note: in the "fctx" mode, the "annotate" |
|
311 | 311 | # command could give us a revision number even if the user passes a |
|
312 | 312 | # commit hash. |
|
313 | 313 | if isinstance(rev, int): |
|
314 | 314 | rev = hex(self.repo.changelog.node(rev)) |
|
315 | 315 | |
|
316 | 316 | # fast path: if rev is in the main branch already |
|
317 | 317 | directly, revfctx = self.canannotatedirectly(rev) |
|
318 | 318 | if directly: |
|
319 | 319 | if self.ui.debugflag: |
|
320 | 320 | self.ui.debug( |
|
321 | 321 | b'fastannotate: %s: using fast path ' |
|
322 | 322 | b'(resolved fctx: %s)\n' |
|
323 | 323 | % ( |
|
324 | 324 | self.path, |
|
325 | 325 | stringutil.pprint(util.safehasattr(revfctx, b'node')), |
|
326 | 326 | ) |
|
327 | 327 | ) |
|
328 | 328 | return self.annotatedirectly(revfctx, showpath, showlines) |
|
329 | 329 | |
|
330 | 330 | # resolve master |
|
331 | 331 | masterfctx = None |
|
332 | 332 | if master: |
|
333 | 333 | try: |
|
334 | 334 | masterfctx = self._resolvefctx( |
|
335 | 335 | master, resolverev=True, adjustctx=True |
|
336 | 336 | ) |
|
337 | 337 | except LookupError: # master does not have the file |
|
338 | 338 | pass |
|
339 | 339 | else: |
|
340 | 340 | if masterfctx in self.revmap: # no need to update linelog |
|
341 | 341 | masterfctx = None |
|
342 | 342 | |
|
343 | 343 | # ... - @ <- rev (can be an arbitrary changeset, |
|
344 | 344 | # / not necessarily a descendant |
|
345 | 345 | # master -> o of master) |
|
346 | 346 | # | |
|
347 | 347 | # a merge -> o 'o': new changesets in the main branch |
|
348 | 348 | # |\ '#': revisions in the main branch that |
|
349 | 349 | # o * exist in linelog / revmap |
|
350 | 350 | # | . '*': changesets in side branches, or |
|
351 | 351 | # last master -> # . descendants of master |
|
352 | 352 | # | . |
|
353 | 353 | # # * joint: '#', and is a parent of a '*' |
|
354 | 354 | # |/ |
|
355 | 355 | # a joint -> # ^^^^ --- side branches |
|
356 | 356 | # | |
|
357 | 357 | # ^ --- main branch (in linelog) |
|
358 | 358 | |
|
359 | 359 | # these DFSes are similar to the traditional annotate algorithm. |
|
360 | 360 | # we cannot really reuse the code for perf reason. |
|
361 | 361 | |
|
362 | 362 | # 1st DFS calculates merges, joint points, and needed. |
|
363 | 363 | # "needed" is a simple reference counting dict to free items in |
|
364 | 364 | # "hist", reducing its memory usage otherwise could be huge. |
|
365 | 365 | initvisit = [revfctx] |
|
366 | 366 | if masterfctx: |
|
367 | 367 | if masterfctx.rev() is None: |
|
368 | 368 | raise error.Abort( |
|
369 | 369 | _(b'cannot update linelog to wdir()'), |
|
370 | 370 | hint=_(b'set fastannotate.mainbranch'), |
|
371 | 371 | ) |
|
372 | 372 | initvisit.append(masterfctx) |
|
373 | 373 | visit = initvisit[:] |
|
374 | 374 | pcache = {} |
|
375 | 375 | needed = {revfctx: 1} |
|
376 | 376 | hist = {} # {fctx: ([(llrev or fctx, linenum)], text)} |
|
377 | 377 | while visit: |
|
378 | 378 | f = visit.pop() |
|
379 | 379 | if f in pcache or f in hist: |
|
380 | 380 | continue |
|
381 | 381 | if f in self.revmap: # in the old main branch, it's a joint |
|
382 | 382 | llrev = self.revmap.hsh2rev(f.node()) |
|
383 | 383 | self.linelog.annotate(llrev) |
|
384 | 384 | result = self.linelog.annotateresult |
|
385 | 385 | hist[f] = (result, f.data()) |
|
386 | 386 | continue |
|
387 | 387 | pl = self._parentfunc(f) |
|
388 | 388 | pcache[f] = pl |
|
389 | 389 | for p in pl: |
|
390 | 390 | needed[p] = needed.get(p, 0) + 1 |
|
391 | 391 | if p not in pcache: |
|
392 | 392 | visit.append(p) |
|
393 | 393 | |
|
394 | 394 | # 2nd (simple) DFS calculates new changesets in the main branch |
|
395 | 395 | # ('o' nodes in # the above graph), so we know when to update linelog. |
|
396 | 396 | newmainbranch = set() |
|
397 | 397 | f = masterfctx |
|
398 | 398 | while f and f not in self.revmap: |
|
399 | 399 | newmainbranch.add(f) |
|
400 | 400 | pl = pcache[f] |
|
401 | 401 | if pl: |
|
402 | 402 | f = pl[0] |
|
403 | 403 | else: |
|
404 | 404 | f = None |
|
405 | 405 | break |
|
406 | 406 | |
|
407 | 407 | # f, if present, is the position where the last build stopped at, and |
|
408 | 408 | # should be the "master" last time. check to see if we can continue |
|
409 | 409 | # building the linelog incrementally. (we cannot if diverged) |
|
410 | 410 | if masterfctx is not None: |
|
411 | 411 | self._checklastmasterhead(f) |
|
412 | 412 | |
|
413 | 413 | if self.ui.debugflag: |
|
414 | 414 | if newmainbranch: |
|
415 | 415 | self.ui.debug( |
|
416 | 416 | b'fastannotate: %s: %d new changesets in the main' |
|
417 | 417 | b' branch\n' % (self.path, len(newmainbranch)) |
|
418 | 418 | ) |
|
419 | 419 | elif not hist: # no joints, no updates |
|
420 | 420 | self.ui.debug( |
|
421 | 421 | b'fastannotate: %s: linelog cannot help in ' |
|
422 | 422 | b'annotating this revision\n' % self.path |
|
423 | 423 | ) |
|
424 | 424 | |
|
425 | 425 | # prepare annotateresult so we can update linelog incrementally |
|
426 | 426 | self.linelog.annotate(self.linelog.maxrev) |
|
427 | 427 | |
|
428 | 428 | # 3rd DFS does the actual annotate |
|
429 | 429 | visit = initvisit[:] |
|
430 | 430 | progress = self.ui.makeprogress( |
|
431 | 431 | b'building cache', total=len(newmainbranch) |
|
432 | 432 | ) |
|
433 | 433 | while visit: |
|
434 | 434 | f = visit[-1] |
|
435 | 435 | if f in hist: |
|
436 | 436 | visit.pop() |
|
437 | 437 | continue |
|
438 | 438 | |
|
439 | 439 | ready = True |
|
440 | 440 | pl = pcache[f] |
|
441 | 441 | for p in pl: |
|
442 | 442 | if p not in hist: |
|
443 | 443 | ready = False |
|
444 | 444 | visit.append(p) |
|
445 | 445 | if not ready: |
|
446 | 446 | continue |
|
447 | 447 | |
|
448 | 448 | visit.pop() |
|
449 | 449 | blocks = None # mdiff blocks, used for appending linelog |
|
450 | 450 | ismainbranch = f in newmainbranch |
|
451 | 451 | # curr is the same as the traditional annotate algorithm, |
|
452 | 452 | # if we only care about linear history (do not follow merge), |
|
453 | 453 | # then curr is not actually used. |
|
454 | 454 | assert f not in hist |
|
455 | 455 | curr = _decorate(f) |
|
456 | 456 | for i, p in enumerate(pl): |
|
457 | 457 | bs = list(self._diffblocks(hist[p][1], curr[1])) |
|
458 | 458 | if i == 0 and ismainbranch: |
|
459 | 459 | blocks = bs |
|
460 | 460 | curr = _pair(hist[p], curr, bs) |
|
461 | 461 | if needed[p] == 1: |
|
462 | 462 | del hist[p] |
|
463 | 463 | del needed[p] |
|
464 | 464 | else: |
|
465 | 465 | needed[p] -= 1 |
|
466 | 466 | |
|
467 | 467 | hist[f] = curr |
|
468 | 468 | del pcache[f] |
|
469 | 469 | |
|
470 | 470 | if ismainbranch: # need to write to linelog |
|
471 | 471 | progress.increment() |
|
472 | 472 | bannotated = None |
|
473 | 473 | if len(pl) == 2 and self.opts.followmerge: # merge |
|
474 | 474 | bannotated = curr[0] |
|
475 | 475 | if blocks is None: # no parents, add an empty one |
|
476 | 476 | blocks = list(self._diffblocks(b'', curr[1])) |
|
477 | 477 | self._appendrev(f, blocks, bannotated) |
|
478 | 478 | elif showpath: # not append linelog, but we need to record path |
|
479 | 479 | self._node2path[f.node()] = f.path() |
|
480 | 480 | |
|
481 | 481 | progress.complete() |
|
482 | 482 | |
|
483 | 483 | result = [ |
|
484 | 484 | ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l) |
|
485 | 485 | for fr, l in hist[revfctx][0] |
|
486 | 486 | ] # [(node, linenumber)] |
|
487 | 487 | return self._refineannotateresult(result, revfctx, showpath, showlines) |
|
488 | 488 | |
|
489 | 489 | def canannotatedirectly(self, rev): |
|
490 | 490 | """(str) -> bool, fctx or node. |
|
491 | 491 | return (True, f) if we can annotate without updating the linelog, pass |
|
492 | 492 | f to annotatedirectly. |
|
493 | 493 | return (False, f) if we need extra calculation. f is the fctx resolved |
|
494 | 494 | from rev. |
|
495 | 495 | """ |
|
496 | 496 | result = True |
|
497 | 497 | f = None |
|
498 | 498 | if not isinstance(rev, int) and rev is not None: |
|
499 | 499 | hsh = {20: bytes, 40: bin}.get(len(rev), lambda x: None)(rev) |
|
500 | 500 | if hsh is not None and (hsh, self.path) in self.revmap: |
|
501 | 501 | f = hsh |
|
502 | 502 | if f is None: |
|
503 | 503 | adjustctx = b'linkrev' if self._perfhack else True |
|
504 | 504 | f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True) |
|
505 | 505 | result = f in self.revmap |
|
506 | 506 | if not result and self._perfhack: |
|
507 | 507 | # redo the resolution without perfhack - as we are going to |
|
508 | 508 | # do write operations, we need a correct fctx. |
|
509 | 509 | f = self._resolvefctx(rev, adjustctx=True, resolverev=True) |
|
510 | 510 | return result, f |
|
511 | 511 | |
|
512 | 512 | def annotatealllines(self, rev, showpath=False, showlines=False): |
|
513 | 513 | """(rev : str) -> [(node : str, linenum : int, path : str)] |
|
514 | 514 | |
|
515 | 515 | the result has the same format with annotate, but include all (including |
|
516 | 516 | deleted) lines up to rev. call this after calling annotate(rev, ...) for |
|
517 | 517 | better performance and accuracy. |
|
518 | 518 | """ |
|
519 | 519 | revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True) |
|
520 | 520 | |
|
521 | 521 | # find a chain from rev to anything in the mainbranch |
|
522 | 522 | if revfctx not in self.revmap: |
|
523 | 523 | chain = [revfctx] |
|
524 | 524 | a = b'' |
|
525 | 525 | while True: |
|
526 | 526 | f = chain[-1] |
|
527 | 527 | pl = self._parentfunc(f) |
|
528 | 528 | if not pl: |
|
529 | 529 | break |
|
530 | 530 | if pl[0] in self.revmap: |
|
531 | 531 | a = pl[0].data() |
|
532 | 532 | break |
|
533 | 533 | chain.append(pl[0]) |
|
534 | 534 | |
|
535 | 535 | # both self.linelog and self.revmap is backed by filesystem. now |
|
536 | 536 | # we want to modify them but do not want to write changes back to |
|
537 | 537 | # files. so we create in-memory objects and copy them. it's like |
|
538 | 538 | # a "fork". |
|
539 | 539 | linelog = linelogmod.linelog() |
|
540 | 540 | linelog.copyfrom(self.linelog) |
|
541 | 541 | linelog.annotate(linelog.maxrev) |
|
542 | 542 | revmap = revmapmod.revmap() |
|
543 | 543 | revmap.copyfrom(self.revmap) |
|
544 | 544 | |
|
545 | 545 | for f in reversed(chain): |
|
546 | 546 | b = f.data() |
|
547 | 547 | blocks = list(self._diffblocks(a, b)) |
|
548 | 548 | self._doappendrev(linelog, revmap, f, blocks) |
|
549 | 549 | a = b |
|
550 | 550 | else: |
|
551 | 551 | # fastpath: use existing linelog, revmap as we don't write to them |
|
552 | 552 | linelog = self.linelog |
|
553 | 553 | revmap = self.revmap |
|
554 | 554 | |
|
555 | 555 | lines = linelog.getalllines() |
|
556 | 556 | hsh = revfctx.node() |
|
557 | 557 | llrev = revmap.hsh2rev(hsh) |
|
558 | 558 | result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev] |
|
559 | 559 | # cannot use _refineannotateresult since we need custom logic for |
|
560 | 560 | # resolving line contents |
|
561 | 561 | if showpath: |
|
562 | 562 | result = self._addpathtoresult(result, revmap) |
|
563 | 563 | if showlines: |
|
564 | 564 | linecontents = self._resolvelines(result, revmap, linelog) |
|
565 | 565 | result = (result, linecontents) |
|
566 | 566 | return result |
|
567 | 567 | |
|
568 | 568 | def _resolvelines(self, annotateresult, revmap, linelog): |
|
569 | 569 | """(annotateresult) -> [line]. designed for annotatealllines. |
|
570 | 570 | this is probably the most inefficient code in the whole fastannotate |
|
571 | 571 | directory. but we have made a decision that the linelog does not |
|
572 | 572 | store line contents. so getting them requires random accesses to |
|
573 | 573 | the revlog data, since they can be many, it can be very slow. |
|
574 | 574 | """ |
|
575 | 575 | # [llrev] |
|
576 | 576 | revs = [revmap.hsh2rev(l[0]) for l in annotateresult] |
|
577 | 577 | result = [None] * len(annotateresult) |
|
578 | 578 | # {(rev, linenum): [lineindex]} |
|
579 | 579 | key2idxs = collections.defaultdict(list) |
|
580 | 580 | for i in pycompat.xrange(len(result)): |
|
581 | 581 | key2idxs[(revs[i], annotateresult[i][1])].append(i) |
|
582 | 582 | while key2idxs: |
|
583 | 583 | # find an unresolved line and its linelog rev to annotate |
|
584 | 584 | hsh = None |
|
585 | 585 | try: |
|
586 |
for (rev, _linenum), idxs in |
|
|
586 | for (rev, _linenum), idxs in key2idxs.items(): | |
|
587 | 587 | if revmap.rev2flag(rev) & revmapmod.sidebranchflag: |
|
588 | 588 | continue |
|
589 | 589 | hsh = annotateresult[idxs[0]][0] |
|
590 | 590 | break |
|
591 | 591 | except StopIteration: # no more unresolved lines |
|
592 | 592 | return result |
|
593 | 593 | if hsh is None: |
|
594 | 594 | # the remaining key2idxs are not in main branch, resolving them |
|
595 | 595 | # using the hard way... |
|
596 | 596 | revlines = {} |
|
597 |
for (rev, linenum), idxs in |
|
|
597 | for (rev, linenum), idxs in key2idxs.items(): | |
|
598 | 598 | if rev not in revlines: |
|
599 | 599 | hsh = annotateresult[idxs[0]][0] |
|
600 | 600 | if self.ui.debugflag: |
|
601 | 601 | self.ui.debug( |
|
602 | 602 | b'fastannotate: reading %s line #%d ' |
|
603 | 603 | b'to resolve lines %r\n' |
|
604 | 604 | % (short(hsh), linenum, idxs) |
|
605 | 605 | ) |
|
606 | 606 | fctx = self._resolvefctx(hsh, revmap.rev2path(rev)) |
|
607 | 607 | lines = mdiff.splitnewlines(fctx.data()) |
|
608 | 608 | revlines[rev] = lines |
|
609 | 609 | for idx in idxs: |
|
610 | 610 | result[idx] = revlines[rev][linenum] |
|
611 | 611 | assert all(x is not None for x in result) |
|
612 | 612 | return result |
|
613 | 613 | |
|
614 | 614 | # run the annotate and the lines should match to the file content |
|
615 | 615 | self.ui.debug( |
|
616 | 616 | b'fastannotate: annotate %s to resolve lines\n' % short(hsh) |
|
617 | 617 | ) |
|
618 | 618 | linelog.annotate(rev) |
|
619 | 619 | fctx = self._resolvefctx(hsh, revmap.rev2path(rev)) |
|
620 | 620 | annotated = linelog.annotateresult |
|
621 | 621 | lines = mdiff.splitnewlines(fctx.data()) |
|
622 | 622 | if len(lines) != len(annotated): |
|
623 | 623 | raise faerror.CorruptedFileError(b'unexpected annotated lines') |
|
624 | 624 | # resolve lines from the annotate result |
|
625 | 625 | for i, line in enumerate(lines): |
|
626 | 626 | k = annotated[i] |
|
627 | 627 | if k in key2idxs: |
|
628 | 628 | for idx in key2idxs[k]: |
|
629 | 629 | result[idx] = line |
|
630 | 630 | del key2idxs[k] |
|
631 | 631 | return result |
|
632 | 632 | |
|
633 | 633 | def annotatedirectly(self, f, showpath, showlines): |
|
634 | 634 | """like annotate, but when we know that f is in linelog. |
|
635 | 635 | f can be either a 20-char str (node) or a fctx. this is for perf - in |
|
636 | 636 | the best case, the user provides a node and we don't need to read the |
|
637 | 637 | filelog or construct any filecontext. |
|
638 | 638 | """ |
|
639 | 639 | if isinstance(f, bytes): |
|
640 | 640 | hsh = f |
|
641 | 641 | else: |
|
642 | 642 | hsh = f.node() |
|
643 | 643 | llrev = self.revmap.hsh2rev(hsh) |
|
644 | 644 | if not llrev: |
|
645 | 645 | raise faerror.CorruptedFileError(b'%s is not in revmap' % hex(hsh)) |
|
646 | 646 | if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0: |
|
647 | 647 | raise faerror.CorruptedFileError( |
|
648 | 648 | b'%s is not in revmap mainbranch' % hex(hsh) |
|
649 | 649 | ) |
|
650 | 650 | self.linelog.annotate(llrev) |
|
651 | 651 | result = [ |
|
652 | 652 | (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult |
|
653 | 653 | ] |
|
654 | 654 | return self._refineannotateresult(result, f, showpath, showlines) |
|
655 | 655 | |
|
656 | 656 | def _refineannotateresult(self, result, f, showpath, showlines): |
|
657 | 657 | """add the missing path or line contents, they can be expensive. |
|
658 | 658 | f could be either node or fctx. |
|
659 | 659 | """ |
|
660 | 660 | if showpath: |
|
661 | 661 | result = self._addpathtoresult(result) |
|
662 | 662 | if showlines: |
|
663 | 663 | if isinstance(f, bytes): # f: node or fctx |
|
664 | 664 | llrev = self.revmap.hsh2rev(f) |
|
665 | 665 | fctx = self._resolvefctx(f, self.revmap.rev2path(llrev)) |
|
666 | 666 | else: |
|
667 | 667 | fctx = f |
|
668 | 668 | lines = mdiff.splitnewlines(fctx.data()) |
|
669 | 669 | if len(lines) != len(result): # linelog is probably corrupted |
|
670 | 670 | raise faerror.CorruptedFileError() |
|
671 | 671 | result = (result, lines) |
|
672 | 672 | return result |
|
673 | 673 | |
|
674 | 674 | def _appendrev(self, fctx, blocks, bannotated=None): |
|
675 | 675 | self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated) |
|
676 | 676 | |
|
677 | 677 | def _diffblocks(self, a, b): |
|
678 | 678 | return mdiff.allblocks(a, b, self.opts.diffopts) |
|
679 | 679 | |
|
680 | 680 | @staticmethod |
|
681 | 681 | def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None): |
|
682 | 682 | """append a revision to linelog and revmap""" |
|
683 | 683 | |
|
684 | 684 | def getllrev(f): |
|
685 | 685 | """(fctx) -> int""" |
|
686 | 686 | # f should not be a linelog revision |
|
687 | 687 | if isinstance(f, int): |
|
688 | 688 | raise error.ProgrammingError(b'f should not be an int') |
|
689 | 689 | # f is a fctx, allocate linelog rev on demand |
|
690 | 690 | hsh = f.node() |
|
691 | 691 | rev = revmap.hsh2rev(hsh) |
|
692 | 692 | if rev is None: |
|
693 | 693 | rev = revmap.append(hsh, sidebranch=True, path=f.path()) |
|
694 | 694 | return rev |
|
695 | 695 | |
|
696 | 696 | # append sidebranch revisions to revmap |
|
697 | 697 | siderevs = [] |
|
698 | 698 | siderevmap = {} # node: int |
|
699 | 699 | if bannotated is not None: |
|
700 | 700 | for (a1, a2, b1, b2), op in blocks: |
|
701 | 701 | if op != b'=': |
|
702 | 702 | # f could be either linelong rev, or fctx. |
|
703 | 703 | siderevs += [ |
|
704 | 704 | f |
|
705 | 705 | for f, l in bannotated[b1:b2] |
|
706 | 706 | if not isinstance(f, int) |
|
707 | 707 | ] |
|
708 | 708 | siderevs = set(siderevs) |
|
709 | 709 | if fctx in siderevs: # mainnode must be appended seperately |
|
710 | 710 | siderevs.remove(fctx) |
|
711 | 711 | for f in siderevs: |
|
712 | 712 | siderevmap[f] = getllrev(f) |
|
713 | 713 | |
|
714 | 714 | # the changeset in the main branch, could be a merge |
|
715 | 715 | llrev = revmap.append(fctx.node(), path=fctx.path()) |
|
716 | 716 | siderevmap[fctx] = llrev |
|
717 | 717 | |
|
718 | 718 | for (a1, a2, b1, b2), op in reversed(blocks): |
|
719 | 719 | if op == b'=': |
|
720 | 720 | continue |
|
721 | 721 | if bannotated is None: |
|
722 | 722 | linelog.replacelines(llrev, a1, a2, b1, b2) |
|
723 | 723 | else: |
|
724 | 724 | blines = [ |
|
725 | 725 | ((r if isinstance(r, int) else siderevmap[r]), l) |
|
726 | 726 | for r, l in bannotated[b1:b2] |
|
727 | 727 | ] |
|
728 | 728 | linelog.replacelines_vec(llrev, a1, a2, blines) |
|
729 | 729 | |
|
730 | 730 | def _addpathtoresult(self, annotateresult, revmap=None): |
|
731 | 731 | """(revmap, [(node, linenum)]) -> [(node, linenum, path)]""" |
|
732 | 732 | if revmap is None: |
|
733 | 733 | revmap = self.revmap |
|
734 | 734 | |
|
735 | 735 | def _getpath(nodeid): |
|
736 | 736 | path = self._node2path.get(nodeid) |
|
737 | 737 | if path is None: |
|
738 | 738 | path = revmap.rev2path(revmap.hsh2rev(nodeid)) |
|
739 | 739 | self._node2path[nodeid] = path |
|
740 | 740 | return path |
|
741 | 741 | |
|
742 | 742 | return [(n, l, _getpath(n)) for n, l in annotateresult] |
|
743 | 743 | |
|
744 | 744 | def _checklastmasterhead(self, fctx): |
|
745 | 745 | """check if fctx is the master's head last time, raise if not""" |
|
746 | 746 | if fctx is None: |
|
747 | 747 | llrev = 0 |
|
748 | 748 | else: |
|
749 | 749 | llrev = self.revmap.hsh2rev(fctx.node()) |
|
750 | 750 | if not llrev: |
|
751 | 751 | raise faerror.CannotReuseError() |
|
752 | 752 | if self.linelog.maxrev != llrev: |
|
753 | 753 | raise faerror.CannotReuseError() |
|
754 | 754 | |
|
755 | 755 | @util.propertycache |
|
756 | 756 | def _parentfunc(self): |
|
757 | 757 | """-> (fctx) -> [fctx]""" |
|
758 | 758 | followrename = self.opts.followrename |
|
759 | 759 | followmerge = self.opts.followmerge |
|
760 | 760 | |
|
761 | 761 | def parents(f): |
|
762 | 762 | pl = _parents(f, follow=followrename) |
|
763 | 763 | if not followmerge: |
|
764 | 764 | pl = pl[:1] |
|
765 | 765 | return pl |
|
766 | 766 | |
|
767 | 767 | return parents |
|
768 | 768 | |
|
769 | 769 | @util.propertycache |
|
770 | 770 | def _perfhack(self): |
|
771 | 771 | return self.ui.configbool(b'fastannotate', b'perfhack') |
|
772 | 772 | |
|
773 | 773 | def _resolvefctx(self, rev, path=None, **kwds): |
|
774 | 774 | return resolvefctx(self.repo, rev, (path or self.path), **kwds) |
|
775 | 775 | |
|
776 | 776 | |
|
777 | 777 | def _unlinkpaths(paths): |
|
778 | 778 | """silent, best-effort unlink""" |
|
779 | 779 | for path in paths: |
|
780 | 780 | try: |
|
781 | 781 | util.unlink(path) |
|
782 | 782 | except OSError: |
|
783 | 783 | pass |
|
784 | 784 | |
|
785 | 785 | |
|
786 | 786 | class pathhelper(object): |
|
787 | 787 | """helper for getting paths for lockfile, linelog and revmap""" |
|
788 | 788 | |
|
789 | 789 | def __init__(self, repo, path, opts=defaultopts): |
|
790 | 790 | # different options use different directories |
|
791 | 791 | self._vfspath = os.path.join( |
|
792 | 792 | b'fastannotate', opts.shortstr, encodedir(path) |
|
793 | 793 | ) |
|
794 | 794 | self._repo = repo |
|
795 | 795 | |
|
796 | 796 | @property |
|
797 | 797 | def dirname(self): |
|
798 | 798 | return os.path.dirname(self._repo.vfs.join(self._vfspath)) |
|
799 | 799 | |
|
800 | 800 | @property |
|
801 | 801 | def linelogpath(self): |
|
802 | 802 | return self._repo.vfs.join(self._vfspath + b'.l') |
|
803 | 803 | |
|
804 | 804 | def lock(self): |
|
805 | 805 | return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock') |
|
806 | 806 | |
|
807 | 807 | @property |
|
808 | 808 | def revmappath(self): |
|
809 | 809 | return self._repo.vfs.join(self._vfspath + b'.m') |
|
810 | 810 | |
|
811 | 811 | |
|
812 | 812 | @contextlib.contextmanager |
|
813 | 813 | def annotatecontext(repo, path, opts=defaultopts, rebuild=False): |
|
814 | 814 | """context needed to perform (fast) annotate on a file |
|
815 | 815 | |
|
816 | 816 | an annotatecontext of a single file consists of two structures: the |
|
817 | 817 | linelog and the revmap. this function takes care of locking. only 1 |
|
818 | 818 | process is allowed to write that file's linelog and revmap at a time. |
|
819 | 819 | |
|
820 | 820 | when something goes wrong, this function will assume the linelog and the |
|
821 | 821 | revmap are in a bad state, and remove them from disk. |
|
822 | 822 | |
|
823 | 823 | use this function in the following way: |
|
824 | 824 | |
|
825 | 825 | with annotatecontext(...) as actx: |
|
826 | 826 | actx. .... |
|
827 | 827 | """ |
|
828 | 828 | helper = pathhelper(repo, path, opts) |
|
829 | 829 | util.makedirs(helper.dirname) |
|
830 | 830 | revmappath = helper.revmappath |
|
831 | 831 | linelogpath = helper.linelogpath |
|
832 | 832 | actx = None |
|
833 | 833 | try: |
|
834 | 834 | with helper.lock(): |
|
835 | 835 | actx = _annotatecontext(repo, path, linelogpath, revmappath, opts) |
|
836 | 836 | if rebuild: |
|
837 | 837 | actx.rebuild() |
|
838 | 838 | yield actx |
|
839 | 839 | except Exception: |
|
840 | 840 | if actx is not None: |
|
841 | 841 | actx.rebuild() |
|
842 | 842 | repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path) |
|
843 | 843 | raise |
|
844 | 844 | finally: |
|
845 | 845 | if actx is not None: |
|
846 | 846 | actx.close() |
|
847 | 847 | |
|
848 | 848 | |
|
849 | 849 | def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False): |
|
850 | 850 | """like annotatecontext but get the context from a fctx. convenient when |
|
851 | 851 | used in fctx.annotate |
|
852 | 852 | """ |
|
853 | 853 | repo = fctx._repo |
|
854 | 854 | path = fctx._path |
|
855 | 855 | if repo.ui.configbool(b'fastannotate', b'forcefollow', True): |
|
856 | 856 | follow = True |
|
857 | 857 | aopts = annotateopts(diffopts=diffopts, followrename=follow) |
|
858 | 858 | return annotatecontext(repo, path, aopts, rebuild) |
@@ -1,260 +1,259 b'' | |||
|
1 | 1 | # Copyright 2016-present Facebook. All Rights Reserved. |
|
2 | 2 | # |
|
3 | 3 | # protocol: logic for a server providing fastannotate support |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import contextlib |
|
9 | 9 | import os |
|
10 | 10 | |
|
11 | 11 | from mercurial.i18n import _ |
|
12 | 12 | from mercurial.pycompat import open |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | error, |
|
15 | 15 | extensions, |
|
16 | 16 | hg, |
|
17 | pycompat, | |
|
18 | 17 | util, |
|
19 | 18 | wireprotov1peer, |
|
20 | 19 | wireprotov1server, |
|
21 | 20 | ) |
|
22 | 21 | from mercurial.utils import ( |
|
23 | 22 | urlutil, |
|
24 | 23 | ) |
|
25 | 24 | from . import context |
|
26 | 25 | |
|
27 | 26 | # common |
|
28 | 27 | |
|
29 | 28 | |
|
30 | 29 | def _getmaster(ui): |
|
31 | 30 | """get the mainbranch, and enforce it is set""" |
|
32 | 31 | master = ui.config(b'fastannotate', b'mainbranch') |
|
33 | 32 | if not master: |
|
34 | 33 | raise error.Abort( |
|
35 | 34 | _( |
|
36 | 35 | b'fastannotate.mainbranch is required ' |
|
37 | 36 | b'for both the client and the server' |
|
38 | 37 | ) |
|
39 | 38 | ) |
|
40 | 39 | return master |
|
41 | 40 | |
|
42 | 41 | |
|
43 | 42 | # server-side |
|
44 | 43 | |
|
45 | 44 | |
|
46 | 45 | def _capabilities(orig, repo, proto): |
|
47 | 46 | result = orig(repo, proto) |
|
48 | 47 | result.append(b'getannotate') |
|
49 | 48 | return result |
|
50 | 49 | |
|
51 | 50 | |
|
52 | 51 | def _getannotate(repo, proto, path, lastnode): |
|
53 | 52 | # output: |
|
54 | 53 | # FILE := vfspath + '\0' + str(size) + '\0' + content |
|
55 | 54 | # OUTPUT := '' | FILE + OUTPUT |
|
56 | 55 | result = b'' |
|
57 | 56 | buildondemand = repo.ui.configbool( |
|
58 | 57 | b'fastannotate', b'serverbuildondemand', True |
|
59 | 58 | ) |
|
60 | 59 | with context.annotatecontext(repo, path) as actx: |
|
61 | 60 | if buildondemand: |
|
62 | 61 | # update before responding to the client |
|
63 | 62 | master = _getmaster(repo.ui) |
|
64 | 63 | try: |
|
65 | 64 | if not actx.isuptodate(master): |
|
66 | 65 | actx.annotate(master, master) |
|
67 | 66 | except Exception: |
|
68 | 67 | # non-fast-forward move or corrupted. rebuild automically. |
|
69 | 68 | actx.rebuild() |
|
70 | 69 | try: |
|
71 | 70 | actx.annotate(master, master) |
|
72 | 71 | except Exception: |
|
73 | 72 | actx.rebuild() # delete files |
|
74 | 73 | finally: |
|
75 | 74 | # although the "with" context will also do a close/flush, we |
|
76 | 75 | # need to do it early so we can send the correct respond to |
|
77 | 76 | # client. |
|
78 | 77 | actx.close() |
|
79 | 78 | # send back the full content of revmap and linelog, in the future we |
|
80 | 79 | # may want to do some rsync-like fancy updating. |
|
81 | 80 | # the lastnode check is not necessary if the client and the server |
|
82 | 81 | # agree where the main branch is. |
|
83 | 82 | if actx.lastnode != lastnode: |
|
84 | 83 | for p in [actx.revmappath, actx.linelogpath]: |
|
85 | 84 | if not os.path.exists(p): |
|
86 | 85 | continue |
|
87 | 86 | with open(p, b'rb') as f: |
|
88 | 87 | content = f.read() |
|
89 | 88 | vfsbaselen = len(repo.vfs.base + b'/') |
|
90 | 89 | relpath = p[vfsbaselen:] |
|
91 | 90 | result += b'%s\0%d\0%s' % (relpath, len(content), content) |
|
92 | 91 | return result |
|
93 | 92 | |
|
94 | 93 | |
|
95 | 94 | def _registerwireprotocommand(): |
|
96 | 95 | if b'getannotate' in wireprotov1server.commands: |
|
97 | 96 | return |
|
98 | 97 | wireprotov1server.wireprotocommand(b'getannotate', b'path lastnode')( |
|
99 | 98 | _getannotate |
|
100 | 99 | ) |
|
101 | 100 | |
|
102 | 101 | |
|
103 | 102 | def serveruisetup(ui): |
|
104 | 103 | _registerwireprotocommand() |
|
105 | 104 | extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities) |
|
106 | 105 | |
|
107 | 106 | |
|
108 | 107 | # client-side |
|
109 | 108 | |
|
110 | 109 | |
|
111 | 110 | def _parseresponse(payload): |
|
112 | 111 | result = {} |
|
113 | 112 | i = 0 |
|
114 | 113 | l = len(payload) - 1 |
|
115 | 114 | state = 0 # 0: vfspath, 1: size |
|
116 | 115 | vfspath = size = b'' |
|
117 | 116 | while i < l: |
|
118 | 117 | ch = payload[i : i + 1] |
|
119 | 118 | if ch == b'\0': |
|
120 | 119 | if state == 1: |
|
121 | 120 | result[vfspath] = payload[i + 1 : i + 1 + int(size)] |
|
122 | 121 | i += int(size) |
|
123 | 122 | state = 0 |
|
124 | 123 | vfspath = size = b'' |
|
125 | 124 | elif state == 0: |
|
126 | 125 | state = 1 |
|
127 | 126 | else: |
|
128 | 127 | if state == 1: |
|
129 | 128 | size += ch |
|
130 | 129 | elif state == 0: |
|
131 | 130 | vfspath += ch |
|
132 | 131 | i += 1 |
|
133 | 132 | return result |
|
134 | 133 | |
|
135 | 134 | |
|
136 | 135 | def peersetup(ui, peer): |
|
137 | 136 | class fastannotatepeer(peer.__class__): |
|
138 | 137 | @wireprotov1peer.batchable |
|
139 | 138 | def getannotate(self, path, lastnode=None): |
|
140 | 139 | if not self.capable(b'getannotate'): |
|
141 | 140 | ui.warn(_(b'remote peer cannot provide annotate cache\n')) |
|
142 | 141 | return None, None |
|
143 | 142 | else: |
|
144 | 143 | args = {b'path': path, b'lastnode': lastnode or b''} |
|
145 | 144 | return args, _parseresponse |
|
146 | 145 | |
|
147 | 146 | peer.__class__ = fastannotatepeer |
|
148 | 147 | |
|
149 | 148 | |
|
150 | 149 | @contextlib.contextmanager |
|
151 | 150 | def annotatepeer(repo): |
|
152 | 151 | ui = repo.ui |
|
153 | 152 | |
|
154 | 153 | remotedest = ui.config(b'fastannotate', b'remotepath', b'default') |
|
155 | 154 | r = urlutil.get_unique_pull_path(b'fastannotate', repo, ui, remotedest) |
|
156 | 155 | remotepath = r[0] |
|
157 | 156 | peer = hg.peer(ui, {}, remotepath) |
|
158 | 157 | |
|
159 | 158 | try: |
|
160 | 159 | yield peer |
|
161 | 160 | finally: |
|
162 | 161 | peer.close() |
|
163 | 162 | |
|
164 | 163 | |
|
165 | 164 | def clientfetch(repo, paths, lastnodemap=None, peer=None): |
|
166 | 165 | """download annotate cache from the server for paths""" |
|
167 | 166 | if not paths: |
|
168 | 167 | return |
|
169 | 168 | |
|
170 | 169 | if peer is None: |
|
171 | 170 | with annotatepeer(repo) as peer: |
|
172 | 171 | return clientfetch(repo, paths, lastnodemap, peer) |
|
173 | 172 | |
|
174 | 173 | if lastnodemap is None: |
|
175 | 174 | lastnodemap = {} |
|
176 | 175 | |
|
177 | 176 | ui = repo.ui |
|
178 | 177 | results = [] |
|
179 | 178 | with peer.commandexecutor() as batcher: |
|
180 | 179 | ui.debug(b'fastannotate: requesting %d files\n' % len(paths)) |
|
181 | 180 | for p in paths: |
|
182 | 181 | results.append( |
|
183 | 182 | batcher.callcommand( |
|
184 | 183 | b'getannotate', |
|
185 | 184 | {b'path': p, b'lastnode': lastnodemap.get(p)}, |
|
186 | 185 | ) |
|
187 | 186 | ) |
|
188 | 187 | |
|
189 | 188 | for result in results: |
|
190 | 189 | r = result.result() |
|
191 | 190 | # TODO: pconvert these paths on the server? |
|
192 |
r = {util.pconvert(p): v for p, v in |
|
|
191 | r = {util.pconvert(p): v for p, v in r.items()} | |
|
193 | 192 | for path in sorted(r): |
|
194 | 193 | # ignore malicious paths |
|
195 | 194 | if not path.startswith(b'fastannotate/') or b'/../' in ( |
|
196 | 195 | path + b'/' |
|
197 | 196 | ): |
|
198 | 197 | ui.debug( |
|
199 | 198 | b'fastannotate: ignored malicious path %s\n' % path |
|
200 | 199 | ) |
|
201 | 200 | continue |
|
202 | 201 | content = r[path] |
|
203 | 202 | if ui.debugflag: |
|
204 | 203 | ui.debug( |
|
205 | 204 | b'fastannotate: writing %d bytes to %s\n' |
|
206 | 205 | % (len(content), path) |
|
207 | 206 | ) |
|
208 | 207 | repo.vfs.makedirs(os.path.dirname(path)) |
|
209 | 208 | with repo.vfs(path, b'wb') as f: |
|
210 | 209 | f.write(content) |
|
211 | 210 | |
|
212 | 211 | |
|
213 | 212 | def _filterfetchpaths(repo, paths): |
|
214 | 213 | """return a subset of paths whose history is long and need to fetch linelog |
|
215 | 214 | from the server. works with remotefilelog and non-remotefilelog repos. |
|
216 | 215 | """ |
|
217 | 216 | threshold = repo.ui.configint(b'fastannotate', b'clientfetchthreshold', 10) |
|
218 | 217 | if threshold <= 0: |
|
219 | 218 | return paths |
|
220 | 219 | |
|
221 | 220 | result = [] |
|
222 | 221 | for path in paths: |
|
223 | 222 | try: |
|
224 | 223 | if len(repo.file(path)) >= threshold: |
|
225 | 224 | result.append(path) |
|
226 | 225 | except Exception: # file not found etc. |
|
227 | 226 | result.append(path) |
|
228 | 227 | |
|
229 | 228 | return result |
|
230 | 229 | |
|
231 | 230 | |
|
232 | 231 | def localreposetup(ui, repo): |
|
233 | 232 | class fastannotaterepo(repo.__class__): |
|
234 | 233 | def prefetchfastannotate(self, paths, peer=None): |
|
235 | 234 | master = _getmaster(self.ui) |
|
236 | 235 | needupdatepaths = [] |
|
237 | 236 | lastnodemap = {} |
|
238 | 237 | try: |
|
239 | 238 | for path in _filterfetchpaths(self, paths): |
|
240 | 239 | with context.annotatecontext(self, path) as actx: |
|
241 | 240 | if not actx.isuptodate(master, strict=False): |
|
242 | 241 | needupdatepaths.append(path) |
|
243 | 242 | lastnodemap[path] = actx.lastnode |
|
244 | 243 | if needupdatepaths: |
|
245 | 244 | clientfetch(self, needupdatepaths, lastnodemap, peer) |
|
246 | 245 | except Exception as ex: |
|
247 | 246 | # could be directory not writable or so, not fatal |
|
248 | 247 | self.ui.debug(b'fastannotate: prefetch failed: %r\n' % ex) |
|
249 | 248 | |
|
250 | 249 | repo.__class__ = fastannotaterepo |
|
251 | 250 | |
|
252 | 251 | |
|
253 | 252 | def clientreposetup(ui, repo): |
|
254 | 253 | _registerwireprotocommand() |
|
255 | 254 | if repo.local(): |
|
256 | 255 | localreposetup(ui, repo) |
|
257 | 256 | # TODO: this mutates global state, but only if at least one repo |
|
258 | 257 | # has the extension enabled. This is probably bad for hgweb. |
|
259 | 258 | if peersetup not in hg.wirepeersetupfuncs: |
|
260 | 259 | hg.wirepeersetupfuncs.append(peersetup) |
@@ -1,957 +1,955 b'' | |||
|
1 | 1 | # fix - rewrite file content in changesets and working copy |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2018 Google LLC. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """rewrite file content in changesets or working copy (EXPERIMENTAL) |
|
8 | 8 | |
|
9 | 9 | Provides a command that runs configured tools on the contents of modified files, |
|
10 | 10 | writing back any fixes to the working copy or replacing changesets. |
|
11 | 11 | |
|
12 | 12 | Here is an example configuration that causes :hg:`fix` to apply automatic |
|
13 | 13 | formatting fixes to modified lines in C++ code:: |
|
14 | 14 | |
|
15 | 15 | [fix] |
|
16 | 16 | clang-format:command=clang-format --assume-filename={rootpath} |
|
17 | 17 | clang-format:linerange=--lines={first}:{last} |
|
18 | 18 | clang-format:pattern=set:**.cpp or **.hpp |
|
19 | 19 | |
|
20 | 20 | The :command suboption forms the first part of the shell command that will be |
|
21 | 21 | used to fix a file. The content of the file is passed on standard input, and the |
|
22 | 22 | fixed file content is expected on standard output. Any output on standard error |
|
23 | 23 | will be displayed as a warning. If the exit status is not zero, the file will |
|
24 | 24 | not be affected. A placeholder warning is displayed if there is a non-zero exit |
|
25 | 25 | status but no standard error output. Some values may be substituted into the |
|
26 | 26 | command:: |
|
27 | 27 | |
|
28 | 28 | {rootpath} The path of the file being fixed, relative to the repo root |
|
29 | 29 | {basename} The name of the file being fixed, without the directory path |
|
30 | 30 | |
|
31 | 31 | If the :linerange suboption is set, the tool will only be run if there are |
|
32 | 32 | changed lines in a file. The value of this suboption is appended to the shell |
|
33 | 33 | command once for every range of changed lines in the file. Some values may be |
|
34 | 34 | substituted into the command:: |
|
35 | 35 | |
|
36 | 36 | {first} The 1-based line number of the first line in the modified range |
|
37 | 37 | {last} The 1-based line number of the last line in the modified range |
|
38 | 38 | |
|
39 | 39 | Deleted sections of a file will be ignored by :linerange, because there is no |
|
40 | 40 | corresponding line range in the version being fixed. |
|
41 | 41 | |
|
42 | 42 | By default, tools that set :linerange will only be executed if there is at least |
|
43 | 43 | one changed line range. This is meant to prevent accidents like running a code |
|
44 | 44 | formatter in such a way that it unexpectedly reformats the whole file. If such a |
|
45 | 45 | tool needs to operate on unchanged files, it should set the :skipclean suboption |
|
46 | 46 | to false. |
|
47 | 47 | |
|
48 | 48 | The :pattern suboption determines which files will be passed through each |
|
49 | 49 | configured tool. See :hg:`help patterns` for possible values. However, all |
|
50 | 50 | patterns are relative to the repo root, even if that text says they are relative |
|
51 | 51 | to the current working directory. If there are file arguments to :hg:`fix`, the |
|
52 | 52 | intersection of these patterns is used. |
|
53 | 53 | |
|
54 | 54 | There is also a configurable limit for the maximum size of file that will be |
|
55 | 55 | processed by :hg:`fix`:: |
|
56 | 56 | |
|
57 | 57 | [fix] |
|
58 | 58 | maxfilesize = 2MB |
|
59 | 59 | |
|
60 | 60 | Normally, execution of configured tools will continue after a failure (indicated |
|
61 | 61 | by a non-zero exit status). It can also be configured to abort after the first |
|
62 | 62 | such failure, so that no files will be affected if any tool fails. This abort |
|
63 | 63 | will also cause :hg:`fix` to exit with a non-zero status:: |
|
64 | 64 | |
|
65 | 65 | [fix] |
|
66 | 66 | failure = abort |
|
67 | 67 | |
|
68 | 68 | When multiple tools are configured to affect a file, they execute in an order |
|
69 | 69 | defined by the :priority suboption. The priority suboption has a default value |
|
70 | 70 | of zero for each tool. Tools are executed in order of descending priority. The |
|
71 | 71 | execution order of tools with equal priority is unspecified. For example, you |
|
72 | 72 | could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers |
|
73 | 73 | in a text file by ensuring that 'sort' runs before 'head':: |
|
74 | 74 | |
|
75 | 75 | [fix] |
|
76 | 76 | sort:command = sort -n |
|
77 | 77 | head:command = head -n 10 |
|
78 | 78 | sort:pattern = numbers.txt |
|
79 | 79 | head:pattern = numbers.txt |
|
80 | 80 | sort:priority = 2 |
|
81 | 81 | head:priority = 1 |
|
82 | 82 | |
|
83 | 83 | To account for changes made by each tool, the line numbers used for incremental |
|
84 | 84 | formatting are recomputed before executing the next tool. So, each tool may see |
|
85 | 85 | different values for the arguments added by the :linerange suboption. |
|
86 | 86 | |
|
87 | 87 | Each fixer tool is allowed to return some metadata in addition to the fixed file |
|
88 | 88 | content. The metadata must be placed before the file content on stdout, |
|
89 | 89 | separated from the file content by a zero byte. The metadata is parsed as a JSON |
|
90 | 90 | value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool |
|
91 | 91 | is expected to produce this metadata encoding if and only if the :metadata |
|
92 | 92 | suboption is true:: |
|
93 | 93 | |
|
94 | 94 | [fix] |
|
95 | 95 | tool:command = tool --prepend-json-metadata |
|
96 | 96 | tool:metadata = true |
|
97 | 97 | |
|
98 | 98 | The metadata values are passed to hooks, which can be used to print summaries or |
|
99 | 99 | perform other post-fixing work. The supported hooks are:: |
|
100 | 100 | |
|
101 | 101 | "postfixfile" |
|
102 | 102 | Run once for each file in each revision where any fixer tools made changes |
|
103 | 103 | to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file, |
|
104 | 104 | and "$HG_METADATA" with a map of fixer names to metadata values from fixer |
|
105 | 105 | tools that affected the file. Fixer tools that didn't affect the file have a |
|
106 | 106 | value of None. Only fixer tools that executed are present in the metadata. |
|
107 | 107 | |
|
108 | 108 | "postfix" |
|
109 | 109 | Run once after all files and revisions have been handled. Provides |
|
110 | 110 | "$HG_REPLACEMENTS" with information about what revisions were created and |
|
111 | 111 | made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any |
|
112 | 112 | files in the working copy were updated. Provides a list "$HG_METADATA" |
|
113 | 113 | mapping fixer tool names to lists of metadata values returned from |
|
114 | 114 | executions that modified a file. This aggregates the same metadata |
|
115 | 115 | previously passed to the "postfixfile" hook. |
|
116 | 116 | |
|
117 | 117 | Fixer tools are run in the repository's root directory. This allows them to read |
|
118 | 118 | configuration files from the working copy, or even write to the working copy. |
|
119 | 119 | The working copy is not updated to match the revision being fixed. In fact, |
|
120 | 120 | several revisions may be fixed in parallel. Writes to the working copy are not |
|
121 | 121 | amended into the revision being fixed; fixer tools should always write fixed |
|
122 | 122 | file content back to stdout as documented above. |
|
123 | 123 | """ |
|
124 | 124 | |
|
125 | 125 | |
|
126 | 126 | import collections |
|
127 | 127 | import itertools |
|
128 | 128 | import os |
|
129 | 129 | import re |
|
130 | 130 | import subprocess |
|
131 | 131 | |
|
132 | 132 | from mercurial.i18n import _ |
|
133 | 133 | from mercurial.node import ( |
|
134 | 134 | nullid, |
|
135 | 135 | nullrev, |
|
136 | 136 | wdirrev, |
|
137 | 137 | ) |
|
138 | 138 | |
|
139 | 139 | from mercurial.utils import procutil |
|
140 | 140 | |
|
141 | 141 | from mercurial import ( |
|
142 | 142 | cmdutil, |
|
143 | 143 | context, |
|
144 | 144 | copies, |
|
145 | 145 | error, |
|
146 | 146 | logcmdutil, |
|
147 | 147 | match as matchmod, |
|
148 | 148 | mdiff, |
|
149 | 149 | merge, |
|
150 | 150 | mergestate as mergestatemod, |
|
151 | 151 | pycompat, |
|
152 | 152 | registrar, |
|
153 | 153 | rewriteutil, |
|
154 | 154 | scmutil, |
|
155 | 155 | util, |
|
156 | 156 | worker, |
|
157 | 157 | ) |
|
158 | 158 | |
|
159 | 159 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
160 | 160 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
161 | 161 | # be specifying the version(s) of Mercurial they are tested with, or |
|
162 | 162 | # leave the attribute unspecified. |
|
163 | 163 | testedwith = b'ships-with-hg-core' |
|
164 | 164 | |
|
165 | 165 | cmdtable = {} |
|
166 | 166 | command = registrar.command(cmdtable) |
|
167 | 167 | |
|
168 | 168 | configtable = {} |
|
169 | 169 | configitem = registrar.configitem(configtable) |
|
170 | 170 | |
|
171 | 171 | # Register the suboptions allowed for each configured fixer, and default values. |
|
172 | 172 | FIXER_ATTRS = { |
|
173 | 173 | b'command': None, |
|
174 | 174 | b'linerange': None, |
|
175 | 175 | b'pattern': None, |
|
176 | 176 | b'priority': 0, |
|
177 | 177 | b'metadata': False, |
|
178 | 178 | b'skipclean': True, |
|
179 | 179 | b'enabled': True, |
|
180 | 180 | } |
|
181 | 181 | |
|
182 | 182 | for key, default in FIXER_ATTRS.items(): |
|
183 | 183 | configitem(b'fix', b'.*:%s$' % key, default=default, generic=True) |
|
184 | 184 | |
|
185 | 185 | # A good default size allows most source code files to be fixed, but avoids |
|
186 | 186 | # letting fixer tools choke on huge inputs, which could be surprising to the |
|
187 | 187 | # user. |
|
188 | 188 | configitem(b'fix', b'maxfilesize', default=b'2MB') |
|
189 | 189 | |
|
190 | 190 | # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero. |
|
191 | 191 | # This helps users do shell scripts that stop when a fixer tool signals a |
|
192 | 192 | # problem. |
|
193 | 193 | configitem(b'fix', b'failure', default=b'continue') |
|
194 | 194 | |
|
195 | 195 | |
|
196 | 196 | def checktoolfailureaction(ui, message, hint=None): |
|
197 | 197 | """Abort with 'message' if fix.failure=abort""" |
|
198 | 198 | action = ui.config(b'fix', b'failure') |
|
199 | 199 | if action not in (b'continue', b'abort'): |
|
200 | 200 | raise error.Abort( |
|
201 | 201 | _(b'unknown fix.failure action: %s') % (action,), |
|
202 | 202 | hint=_(b'use "continue" or "abort"'), |
|
203 | 203 | ) |
|
204 | 204 | if action == b'abort': |
|
205 | 205 | raise error.Abort(message, hint=hint) |
|
206 | 206 | |
|
207 | 207 | |
|
208 | 208 | allopt = (b'', b'all', False, _(b'fix all non-public non-obsolete revisions')) |
|
209 | 209 | baseopt = ( |
|
210 | 210 | b'', |
|
211 | 211 | b'base', |
|
212 | 212 | [], |
|
213 | 213 | _( |
|
214 | 214 | b'revisions to diff against (overrides automatic ' |
|
215 | 215 | b'selection, and applies to every revision being ' |
|
216 | 216 | b'fixed)' |
|
217 | 217 | ), |
|
218 | 218 | _(b'REV'), |
|
219 | 219 | ) |
|
220 | 220 | revopt = (b'r', b'rev', [], _(b'revisions to fix (ADVANCED)'), _(b'REV')) |
|
221 | 221 | sourceopt = ( |
|
222 | 222 | b's', |
|
223 | 223 | b'source', |
|
224 | 224 | [], |
|
225 | 225 | _(b'fix the specified revisions and their descendants'), |
|
226 | 226 | _(b'REV'), |
|
227 | 227 | ) |
|
228 | 228 | wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory')) |
|
229 | 229 | wholeopt = (b'', b'whole', False, _(b'always fix every line of a file')) |
|
230 | 230 | usage = _(b'[OPTION]... [FILE]...') |
|
231 | 231 | |
|
232 | 232 | |
|
233 | 233 | @command( |
|
234 | 234 | b'fix', |
|
235 | 235 | [allopt, baseopt, revopt, sourceopt, wdiropt, wholeopt], |
|
236 | 236 | usage, |
|
237 | 237 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
238 | 238 | ) |
|
239 | 239 | def fix(ui, repo, *pats, **opts): |
|
240 | 240 | """rewrite file content in changesets or working directory |
|
241 | 241 | |
|
242 | 242 | Runs any configured tools to fix the content of files. Only affects files |
|
243 | 243 | with changes, unless file arguments are provided. Only affects changed lines |
|
244 | 244 | of files, unless the --whole flag is used. Some tools may always affect the |
|
245 | 245 | whole file regardless of --whole. |
|
246 | 246 | |
|
247 | 247 | If --working-dir is used, files with uncommitted changes in the working copy |
|
248 | 248 | will be fixed. Note that no backup are made. |
|
249 | 249 | |
|
250 | 250 | If revisions are specified with --source, those revisions and their |
|
251 | 251 | descendants will be checked, and they may be replaced with new revisions |
|
252 | 252 | that have fixed file content. By automatically including the descendants, |
|
253 | 253 | no merging, rebasing, or evolution will be required. If an ancestor of the |
|
254 | 254 | working copy is included, then the working copy itself will also be fixed, |
|
255 | 255 | and the working copy will be updated to the fixed parent. |
|
256 | 256 | |
|
257 | 257 | When determining what lines of each file to fix at each revision, the whole |
|
258 | 258 | set of revisions being fixed is considered, so that fixes to earlier |
|
259 | 259 | revisions are not forgotten in later ones. The --base flag can be used to |
|
260 | 260 | override this default behavior, though it is not usually desirable to do so. |
|
261 | 261 | """ |
|
262 | 262 | opts = pycompat.byteskwargs(opts) |
|
263 | 263 | cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev') |
|
264 | 264 | cmdutil.check_incompatible_arguments( |
|
265 | 265 | opts, b'working_dir', [b'all', b'source'] |
|
266 | 266 | ) |
|
267 | 267 | |
|
268 | 268 | with repo.wlock(), repo.lock(), repo.transaction(b'fix'): |
|
269 | 269 | revstofix = getrevstofix(ui, repo, opts) |
|
270 | 270 | basectxs = getbasectxs(repo, opts, revstofix) |
|
271 | 271 | workqueue, numitems = getworkqueue( |
|
272 | 272 | ui, repo, pats, opts, revstofix, basectxs |
|
273 | 273 | ) |
|
274 | 274 | basepaths = getbasepaths(repo, opts, workqueue, basectxs) |
|
275 | 275 | fixers = getfixers(ui) |
|
276 | 276 | |
|
277 | 277 | # Rather than letting each worker independently fetch the files |
|
278 | 278 | # (which also would add complications for shared/keepalive |
|
279 | 279 | # connections), prefetch them all first. |
|
280 | 280 | _prefetchfiles(repo, workqueue, basepaths) |
|
281 | 281 | |
|
282 | 282 | # There are no data dependencies between the workers fixing each file |
|
283 | 283 | # revision, so we can use all available parallelism. |
|
284 | 284 | def getfixes(items): |
|
285 | 285 | for srcrev, path, dstrevs in items: |
|
286 | 286 | ctx = repo[srcrev] |
|
287 | 287 | olddata = ctx[path].data() |
|
288 | 288 | metadata, newdata = fixfile( |
|
289 | 289 | ui, |
|
290 | 290 | repo, |
|
291 | 291 | opts, |
|
292 | 292 | fixers, |
|
293 | 293 | ctx, |
|
294 | 294 | path, |
|
295 | 295 | basepaths, |
|
296 | 296 | basectxs[srcrev], |
|
297 | 297 | ) |
|
298 | 298 | # We ungroup the work items now, because the code that consumes |
|
299 | 299 | # these results has to handle each dstrev separately, and in |
|
300 | 300 | # topological order. Because these are handled in topological |
|
301 | 301 | # order, it's important that we pass around references to |
|
302 | 302 | # "newdata" instead of copying it. Otherwise, we would be |
|
303 | 303 | # keeping more copies of file content in memory at a time than |
|
304 | 304 | # if we hadn't bothered to group/deduplicate the work items. |
|
305 | 305 | data = newdata if newdata != olddata else None |
|
306 | 306 | for dstrev in dstrevs: |
|
307 | 307 | yield (dstrev, path, metadata, data) |
|
308 | 308 | |
|
309 | 309 | results = worker.worker( |
|
310 | 310 | ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False |
|
311 | 311 | ) |
|
312 | 312 | |
|
313 | 313 | # We have to hold on to the data for each successor revision in memory |
|
314 | 314 | # until all its parents are committed. We ensure this by committing and |
|
315 | 315 | # freeing memory for the revisions in some topological order. This |
|
316 | 316 | # leaves a little bit of memory efficiency on the table, but also makes |
|
317 | 317 | # the tests deterministic. It might also be considered a feature since |
|
318 | 318 | # it makes the results more easily reproducible. |
|
319 | 319 | filedata = collections.defaultdict(dict) |
|
320 | 320 | aggregatemetadata = collections.defaultdict(list) |
|
321 | 321 | replacements = {} |
|
322 | 322 | wdirwritten = False |
|
323 | 323 | commitorder = sorted(revstofix, reverse=True) |
|
324 | 324 | with ui.makeprogress( |
|
325 | 325 | topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values()) |
|
326 | 326 | ) as progress: |
|
327 | 327 | for rev, path, filerevmetadata, newdata in results: |
|
328 | 328 | progress.increment(item=path) |
|
329 | 329 | for fixername, fixermetadata in filerevmetadata.items(): |
|
330 | 330 | aggregatemetadata[fixername].append(fixermetadata) |
|
331 | 331 | if newdata is not None: |
|
332 | 332 | filedata[rev][path] = newdata |
|
333 | 333 | hookargs = { |
|
334 | 334 | b'rev': rev, |
|
335 | 335 | b'path': path, |
|
336 | 336 | b'metadata': filerevmetadata, |
|
337 | 337 | } |
|
338 | 338 | repo.hook( |
|
339 | 339 | b'postfixfile', |
|
340 | 340 | throw=False, |
|
341 | 341 | **pycompat.strkwargs(hookargs) |
|
342 | 342 | ) |
|
343 | 343 | numitems[rev] -= 1 |
|
344 | 344 | # Apply the fixes for this and any other revisions that are |
|
345 | 345 | # ready and sitting at the front of the queue. Using a loop here |
|
346 | 346 | # prevents the queue from being blocked by the first revision to |
|
347 | 347 | # be ready out of order. |
|
348 | 348 | while commitorder and not numitems[commitorder[-1]]: |
|
349 | 349 | rev = commitorder.pop() |
|
350 | 350 | ctx = repo[rev] |
|
351 | 351 | if rev == wdirrev: |
|
352 | 352 | writeworkingdir(repo, ctx, filedata[rev], replacements) |
|
353 | 353 | wdirwritten = bool(filedata[rev]) |
|
354 | 354 | else: |
|
355 | 355 | replacerev(ui, repo, ctx, filedata[rev], replacements) |
|
356 | 356 | del filedata[rev] |
|
357 | 357 | |
|
358 | 358 | cleanup(repo, replacements, wdirwritten) |
|
359 | 359 | hookargs = { |
|
360 | 360 | b'replacements': replacements, |
|
361 | 361 | b'wdirwritten': wdirwritten, |
|
362 | 362 | b'metadata': aggregatemetadata, |
|
363 | 363 | } |
|
364 | 364 | repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs)) |
|
365 | 365 | |
|
366 | 366 | |
|
367 | 367 | def cleanup(repo, replacements, wdirwritten): |
|
368 | 368 | """Calls scmutil.cleanupnodes() with the given replacements. |
|
369 | 369 | |
|
370 | 370 | "replacements" is a dict from nodeid to nodeid, with one key and one value |
|
371 | 371 | for every revision that was affected by fixing. This is slightly different |
|
372 | 372 | from cleanupnodes(). |
|
373 | 373 | |
|
374 | 374 | "wdirwritten" is a bool which tells whether the working copy was affected by |
|
375 | 375 | fixing, since it has no entry in "replacements". |
|
376 | 376 | |
|
377 | 377 | Useful as a hook point for extending "hg fix" with output summarizing the |
|
378 | 378 | effects of the command, though we choose not to output anything here. |
|
379 | 379 | """ |
|
380 | replacements = { | |
|
381 | prec: [succ] for prec, succ in pycompat.iteritems(replacements) | |
|
382 | } | |
|
380 | replacements = {prec: [succ] for prec, succ in replacements.items()} | |
|
383 | 381 | scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True) |
|
384 | 382 | |
|
385 | 383 | |
|
386 | 384 | def getworkqueue(ui, repo, pats, opts, revstofix, basectxs): |
|
387 | 385 | """Constructs a list of files to fix and which revisions each fix applies to |
|
388 | 386 | |
|
389 | 387 | To avoid duplicating work, there is usually only one work item for each file |
|
390 | 388 | revision that might need to be fixed. There can be multiple work items per |
|
391 | 389 | file revision if the same file needs to be fixed in multiple changesets with |
|
392 | 390 | different baserevs. Each work item also contains a list of changesets where |
|
393 | 391 | the file's data should be replaced with the fixed data. The work items for |
|
394 | 392 | earlier changesets come earlier in the work queue, to improve pipelining by |
|
395 | 393 | allowing the first changeset to be replaced while fixes are still being |
|
396 | 394 | computed for later changesets. |
|
397 | 395 | |
|
398 | 396 | Also returned is a map from changesets to the count of work items that might |
|
399 | 397 | affect each changeset. This is used later to count when all of a changeset's |
|
400 | 398 | work items have been finished, without having to inspect the remaining work |
|
401 | 399 | queue in each worker subprocess. |
|
402 | 400 | |
|
403 | 401 | The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of |
|
404 | 402 | bar.txt should be read from revision 1, then fixed, and written back to |
|
405 | 403 | revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of |
|
406 | 404 | revisions is called the "dstrevs". In practice the srcrev is always one of |
|
407 | 405 | the dstrevs, and we make that choice when constructing the work item so that |
|
408 | 406 | the choice can't be made inconsistently later on. The dstrevs should all |
|
409 | 407 | have the same file revision for the given path, so the choice of srcrev is |
|
410 | 408 | arbitrary. The wdirrev can be a dstrev and a srcrev. |
|
411 | 409 | """ |
|
412 | 410 | dstrevmap = collections.defaultdict(list) |
|
413 | 411 | numitems = collections.defaultdict(int) |
|
414 | 412 | maxfilesize = ui.configbytes(b'fix', b'maxfilesize') |
|
415 | 413 | for rev in sorted(revstofix): |
|
416 | 414 | fixctx = repo[rev] |
|
417 | 415 | match = scmutil.match(fixctx, pats, opts) |
|
418 | 416 | for path in sorted( |
|
419 | 417 | pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx) |
|
420 | 418 | ): |
|
421 | 419 | fctx = fixctx[path] |
|
422 | 420 | if fctx.islink(): |
|
423 | 421 | continue |
|
424 | 422 | if fctx.size() > maxfilesize: |
|
425 | 423 | ui.warn( |
|
426 | 424 | _(b'ignoring file larger than %s: %s\n') |
|
427 | 425 | % (util.bytecount(maxfilesize), path) |
|
428 | 426 | ) |
|
429 | 427 | continue |
|
430 | 428 | baserevs = tuple(ctx.rev() for ctx in basectxs[rev]) |
|
431 | 429 | dstrevmap[(fctx.filerev(), baserevs, path)].append(rev) |
|
432 | 430 | numitems[rev] += 1 |
|
433 | 431 | workqueue = [ |
|
434 | 432 | (min(dstrevs), path, dstrevs) |
|
435 | 433 | for (_filerev, _baserevs, path), dstrevs in dstrevmap.items() |
|
436 | 434 | ] |
|
437 | 435 | # Move work items for earlier changesets to the front of the queue, so we |
|
438 | 436 | # might be able to replace those changesets (in topological order) while |
|
439 | 437 | # we're still processing later work items. Note the min() in the previous |
|
440 | 438 | # expression, which means we don't need a custom comparator here. The path |
|
441 | 439 | # is also important in the sort order to make the output order stable. There |
|
442 | 440 | # are some situations where this doesn't help much, but some situations |
|
443 | 441 | # where it lets us buffer O(1) files instead of O(n) files. |
|
444 | 442 | workqueue.sort() |
|
445 | 443 | return workqueue, numitems |
|
446 | 444 | |
|
447 | 445 | |
|
448 | 446 | def getrevstofix(ui, repo, opts): |
|
449 | 447 | """Returns the set of revision numbers that should be fixed""" |
|
450 | 448 | if opts[b'all']: |
|
451 | 449 | revs = repo.revs(b'(not public() and not obsolete()) or wdir()') |
|
452 | 450 | elif opts[b'source']: |
|
453 | 451 | source_revs = logcmdutil.revrange(repo, opts[b'source']) |
|
454 | 452 | revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs)) |
|
455 | 453 | if wdirrev in source_revs: |
|
456 | 454 | # `wdir()::` is currently empty, so manually add wdir |
|
457 | 455 | revs.add(wdirrev) |
|
458 | 456 | if repo[b'.'].rev() in revs: |
|
459 | 457 | revs.add(wdirrev) |
|
460 | 458 | else: |
|
461 | 459 | revs = set(logcmdutil.revrange(repo, opts[b'rev'])) |
|
462 | 460 | if opts.get(b'working_dir'): |
|
463 | 461 | revs.add(wdirrev) |
|
464 | 462 | # Allow fixing only wdir() even if there's an unfinished operation |
|
465 | 463 | if not (len(revs) == 1 and wdirrev in revs): |
|
466 | 464 | cmdutil.checkunfinished(repo) |
|
467 | 465 | rewriteutil.precheck(repo, revs, b'fix') |
|
468 | 466 | if ( |
|
469 | 467 | wdirrev in revs |
|
470 | 468 | and mergestatemod.mergestate.read(repo).unresolvedcount() |
|
471 | 469 | ): |
|
472 | 470 | raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'") |
|
473 | 471 | if not revs: |
|
474 | 472 | raise error.Abort( |
|
475 | 473 | b'no changesets specified', hint=b'use --source or --working-dir' |
|
476 | 474 | ) |
|
477 | 475 | return revs |
|
478 | 476 | |
|
479 | 477 | |
|
480 | 478 | def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx): |
|
481 | 479 | """Returns the set of files that should be fixed in a context |
|
482 | 480 | |
|
483 | 481 | The result depends on the base contexts; we include any file that has |
|
484 | 482 | changed relative to any of the base contexts. Base contexts should be |
|
485 | 483 | ancestors of the context being fixed. |
|
486 | 484 | """ |
|
487 | 485 | files = set() |
|
488 | 486 | for basectx in basectxs: |
|
489 | 487 | stat = basectx.status( |
|
490 | 488 | fixctx, match=match, listclean=bool(pats), listunknown=bool(pats) |
|
491 | 489 | ) |
|
492 | 490 | files.update( |
|
493 | 491 | set( |
|
494 | 492 | itertools.chain( |
|
495 | 493 | stat.added, stat.modified, stat.clean, stat.unknown |
|
496 | 494 | ) |
|
497 | 495 | ) |
|
498 | 496 | ) |
|
499 | 497 | return files |
|
500 | 498 | |
|
501 | 499 | |
|
502 | 500 | def lineranges(opts, path, basepaths, basectxs, fixctx, content2): |
|
503 | 501 | """Returns the set of line ranges that should be fixed in a file |
|
504 | 502 | |
|
505 | 503 | Of the form [(10, 20), (30, 40)]. |
|
506 | 504 | |
|
507 | 505 | This depends on the given base contexts; we must consider lines that have |
|
508 | 506 | changed versus any of the base contexts, and whether the file has been |
|
509 | 507 | renamed versus any of them. |
|
510 | 508 | |
|
511 | 509 | Another way to understand this is that we exclude line ranges that are |
|
512 | 510 | common to the file in all base contexts. |
|
513 | 511 | """ |
|
514 | 512 | if opts.get(b'whole'): |
|
515 | 513 | # Return a range containing all lines. Rely on the diff implementation's |
|
516 | 514 | # idea of how many lines are in the file, instead of reimplementing it. |
|
517 | 515 | return difflineranges(b'', content2) |
|
518 | 516 | |
|
519 | 517 | rangeslist = [] |
|
520 | 518 | for basectx in basectxs: |
|
521 | 519 | basepath = basepaths.get((basectx.rev(), fixctx.rev(), path), path) |
|
522 | 520 | |
|
523 | 521 | if basepath in basectx: |
|
524 | 522 | content1 = basectx[basepath].data() |
|
525 | 523 | else: |
|
526 | 524 | content1 = b'' |
|
527 | 525 | rangeslist.extend(difflineranges(content1, content2)) |
|
528 | 526 | return unionranges(rangeslist) |
|
529 | 527 | |
|
530 | 528 | |
|
531 | 529 | def getbasepaths(repo, opts, workqueue, basectxs): |
|
532 | 530 | if opts.get(b'whole'): |
|
533 | 531 | # Base paths will never be fetched for line range determination. |
|
534 | 532 | return {} |
|
535 | 533 | |
|
536 | 534 | basepaths = {} |
|
537 | 535 | for srcrev, path, _dstrevs in workqueue: |
|
538 | 536 | fixctx = repo[srcrev] |
|
539 | 537 | for basectx in basectxs[srcrev]: |
|
540 | 538 | basepath = copies.pathcopies(basectx, fixctx).get(path, path) |
|
541 | 539 | if basepath in basectx: |
|
542 | 540 | basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath |
|
543 | 541 | return basepaths |
|
544 | 542 | |
|
545 | 543 | |
|
546 | 544 | def unionranges(rangeslist): |
|
547 | 545 | """Return the union of some closed intervals |
|
548 | 546 | |
|
549 | 547 | >>> unionranges([]) |
|
550 | 548 | [] |
|
551 | 549 | >>> unionranges([(1, 100)]) |
|
552 | 550 | [(1, 100)] |
|
553 | 551 | >>> unionranges([(1, 100), (1, 100)]) |
|
554 | 552 | [(1, 100)] |
|
555 | 553 | >>> unionranges([(1, 100), (2, 100)]) |
|
556 | 554 | [(1, 100)] |
|
557 | 555 | >>> unionranges([(1, 99), (1, 100)]) |
|
558 | 556 | [(1, 100)] |
|
559 | 557 | >>> unionranges([(1, 100), (40, 60)]) |
|
560 | 558 | [(1, 100)] |
|
561 | 559 | >>> unionranges([(1, 49), (50, 100)]) |
|
562 | 560 | [(1, 100)] |
|
563 | 561 | >>> unionranges([(1, 48), (50, 100)]) |
|
564 | 562 | [(1, 48), (50, 100)] |
|
565 | 563 | >>> unionranges([(1, 2), (3, 4), (5, 6)]) |
|
566 | 564 | [(1, 6)] |
|
567 | 565 | """ |
|
568 | 566 | rangeslist = sorted(set(rangeslist)) |
|
569 | 567 | unioned = [] |
|
570 | 568 | if rangeslist: |
|
571 | 569 | unioned, rangeslist = [rangeslist[0]], rangeslist[1:] |
|
572 | 570 | for a, b in rangeslist: |
|
573 | 571 | c, d = unioned[-1] |
|
574 | 572 | if a > d + 1: |
|
575 | 573 | unioned.append((a, b)) |
|
576 | 574 | else: |
|
577 | 575 | unioned[-1] = (c, max(b, d)) |
|
578 | 576 | return unioned |
|
579 | 577 | |
|
580 | 578 | |
|
581 | 579 | def difflineranges(content1, content2): |
|
582 | 580 | """Return list of line number ranges in content2 that differ from content1. |
|
583 | 581 | |
|
584 | 582 | Line numbers are 1-based. The numbers are the first and last line contained |
|
585 | 583 | in the range. Single-line ranges have the same line number for the first and |
|
586 | 584 | last line. Excludes any empty ranges that result from lines that are only |
|
587 | 585 | present in content1. Relies on mdiff's idea of where the line endings are in |
|
588 | 586 | the string. |
|
589 | 587 | |
|
590 | 588 | >>> from mercurial import pycompat |
|
591 | 589 | >>> lines = lambda s: b'\\n'.join([c for c in pycompat.iterbytestr(s)]) |
|
592 | 590 | >>> difflineranges2 = lambda a, b: difflineranges(lines(a), lines(b)) |
|
593 | 591 | >>> difflineranges2(b'', b'') |
|
594 | 592 | [] |
|
595 | 593 | >>> difflineranges2(b'a', b'') |
|
596 | 594 | [] |
|
597 | 595 | >>> difflineranges2(b'', b'A') |
|
598 | 596 | [(1, 1)] |
|
599 | 597 | >>> difflineranges2(b'a', b'a') |
|
600 | 598 | [] |
|
601 | 599 | >>> difflineranges2(b'a', b'A') |
|
602 | 600 | [(1, 1)] |
|
603 | 601 | >>> difflineranges2(b'ab', b'') |
|
604 | 602 | [] |
|
605 | 603 | >>> difflineranges2(b'', b'AB') |
|
606 | 604 | [(1, 2)] |
|
607 | 605 | >>> difflineranges2(b'abc', b'ac') |
|
608 | 606 | [] |
|
609 | 607 | >>> difflineranges2(b'ab', b'aCb') |
|
610 | 608 | [(2, 2)] |
|
611 | 609 | >>> difflineranges2(b'abc', b'aBc') |
|
612 | 610 | [(2, 2)] |
|
613 | 611 | >>> difflineranges2(b'ab', b'AB') |
|
614 | 612 | [(1, 2)] |
|
615 | 613 | >>> difflineranges2(b'abcde', b'aBcDe') |
|
616 | 614 | [(2, 2), (4, 4)] |
|
617 | 615 | >>> difflineranges2(b'abcde', b'aBCDe') |
|
618 | 616 | [(2, 4)] |
|
619 | 617 | """ |
|
620 | 618 | ranges = [] |
|
621 | 619 | for lines, kind in mdiff.allblocks(content1, content2): |
|
622 | 620 | firstline, lastline = lines[2:4] |
|
623 | 621 | if kind == b'!' and firstline != lastline: |
|
624 | 622 | ranges.append((firstline + 1, lastline)) |
|
625 | 623 | return ranges |
|
626 | 624 | |
|
627 | 625 | |
|
628 | 626 | def getbasectxs(repo, opts, revstofix): |
|
629 | 627 | """Returns a map of the base contexts for each revision |
|
630 | 628 | |
|
631 | 629 | The base contexts determine which lines are considered modified when we |
|
632 | 630 | attempt to fix just the modified lines in a file. It also determines which |
|
633 | 631 | files we attempt to fix, so it is important to compute this even when |
|
634 | 632 | --whole is used. |
|
635 | 633 | """ |
|
636 | 634 | # The --base flag overrides the usual logic, and we give every revision |
|
637 | 635 | # exactly the set of baserevs that the user specified. |
|
638 | 636 | if opts.get(b'base'): |
|
639 | 637 | baserevs = set(logcmdutil.revrange(repo, opts.get(b'base'))) |
|
640 | 638 | if not baserevs: |
|
641 | 639 | baserevs = {nullrev} |
|
642 | 640 | basectxs = {repo[rev] for rev in baserevs} |
|
643 | 641 | return {rev: basectxs for rev in revstofix} |
|
644 | 642 | |
|
645 | 643 | # Proceed in topological order so that we can easily determine each |
|
646 | 644 | # revision's baserevs by looking at its parents and their baserevs. |
|
647 | 645 | basectxs = collections.defaultdict(set) |
|
648 | 646 | for rev in sorted(revstofix): |
|
649 | 647 | ctx = repo[rev] |
|
650 | 648 | for pctx in ctx.parents(): |
|
651 | 649 | if pctx.rev() in basectxs: |
|
652 | 650 | basectxs[rev].update(basectxs[pctx.rev()]) |
|
653 | 651 | else: |
|
654 | 652 | basectxs[rev].add(pctx) |
|
655 | 653 | return basectxs |
|
656 | 654 | |
|
657 | 655 | |
|
658 | 656 | def _prefetchfiles(repo, workqueue, basepaths): |
|
659 | 657 | toprefetch = set() |
|
660 | 658 | |
|
661 | 659 | # Prefetch the files that will be fixed. |
|
662 | 660 | for srcrev, path, _dstrevs in workqueue: |
|
663 | 661 | if srcrev == wdirrev: |
|
664 | 662 | continue |
|
665 | 663 | toprefetch.add((srcrev, path)) |
|
666 | 664 | |
|
667 | 665 | # Prefetch the base contents for lineranges(). |
|
668 | 666 | for (baserev, fixrev, path), basepath in basepaths.items(): |
|
669 | 667 | toprefetch.add((baserev, basepath)) |
|
670 | 668 | |
|
671 | 669 | if toprefetch: |
|
672 | 670 | scmutil.prefetchfiles( |
|
673 | 671 | repo, |
|
674 | 672 | [ |
|
675 | 673 | (rev, scmutil.matchfiles(repo, [path])) |
|
676 | 674 | for rev, path in toprefetch |
|
677 | 675 | ], |
|
678 | 676 | ) |
|
679 | 677 | |
|
680 | 678 | |
|
681 | 679 | def fixfile(ui, repo, opts, fixers, fixctx, path, basepaths, basectxs): |
|
682 | 680 | """Run any configured fixers that should affect the file in this context |
|
683 | 681 | |
|
684 | 682 | Returns the file content that results from applying the fixers in some order |
|
685 | 683 | starting with the file's content in the fixctx. Fixers that support line |
|
686 | 684 | ranges will affect lines that have changed relative to any of the basectxs |
|
687 | 685 | (i.e. they will only avoid lines that are common to all basectxs). |
|
688 | 686 | |
|
689 | 687 | A fixer tool's stdout will become the file's new content if and only if it |
|
690 | 688 | exits with code zero. The fixer tool's working directory is the repository's |
|
691 | 689 | root. |
|
692 | 690 | """ |
|
693 | 691 | metadata = {} |
|
694 | 692 | newdata = fixctx[path].data() |
|
695 |
for fixername, fixer in |
|
|
693 | for fixername, fixer in fixers.items(): | |
|
696 | 694 | if fixer.affects(opts, fixctx, path): |
|
697 | 695 | ranges = lineranges( |
|
698 | 696 | opts, path, basepaths, basectxs, fixctx, newdata |
|
699 | 697 | ) |
|
700 | 698 | command = fixer.command(ui, path, ranges) |
|
701 | 699 | if command is None: |
|
702 | 700 | continue |
|
703 | 701 | ui.debug(b'subprocess: %s\n' % (command,)) |
|
704 | 702 | proc = subprocess.Popen( |
|
705 | 703 | procutil.tonativestr(command), |
|
706 | 704 | shell=True, |
|
707 | 705 | cwd=procutil.tonativestr(repo.root), |
|
708 | 706 | stdin=subprocess.PIPE, |
|
709 | 707 | stdout=subprocess.PIPE, |
|
710 | 708 | stderr=subprocess.PIPE, |
|
711 | 709 | ) |
|
712 | 710 | stdout, stderr = proc.communicate(newdata) |
|
713 | 711 | if stderr: |
|
714 | 712 | showstderr(ui, fixctx.rev(), fixername, stderr) |
|
715 | 713 | newerdata = stdout |
|
716 | 714 | if fixer.shouldoutputmetadata(): |
|
717 | 715 | try: |
|
718 | 716 | metadatajson, newerdata = stdout.split(b'\0', 1) |
|
719 | 717 | metadata[fixername] = pycompat.json_loads(metadatajson) |
|
720 | 718 | except ValueError: |
|
721 | 719 | ui.warn( |
|
722 | 720 | _(b'ignored invalid output from fixer tool: %s\n') |
|
723 | 721 | % (fixername,) |
|
724 | 722 | ) |
|
725 | 723 | continue |
|
726 | 724 | else: |
|
727 | 725 | metadata[fixername] = None |
|
728 | 726 | if proc.returncode == 0: |
|
729 | 727 | newdata = newerdata |
|
730 | 728 | else: |
|
731 | 729 | if not stderr: |
|
732 | 730 | message = _(b'exited with status %d\n') % (proc.returncode,) |
|
733 | 731 | showstderr(ui, fixctx.rev(), fixername, message) |
|
734 | 732 | checktoolfailureaction( |
|
735 | 733 | ui, |
|
736 | 734 | _(b'no fixes will be applied'), |
|
737 | 735 | hint=_( |
|
738 | 736 | b'use --config fix.failure=continue to apply any ' |
|
739 | 737 | b'successful fixes anyway' |
|
740 | 738 | ), |
|
741 | 739 | ) |
|
742 | 740 | return metadata, newdata |
|
743 | 741 | |
|
744 | 742 | |
|
745 | 743 | def showstderr(ui, rev, fixername, stderr): |
|
746 | 744 | """Writes the lines of the stderr string as warnings on the ui |
|
747 | 745 | |
|
748 | 746 | Uses the revision number and fixername to give more context to each line of |
|
749 | 747 | the error message. Doesn't include file names, since those take up a lot of |
|
750 | 748 | space and would tend to be included in the error message if they were |
|
751 | 749 | relevant. |
|
752 | 750 | """ |
|
753 | 751 | for line in re.split(b'[\r\n]+', stderr): |
|
754 | 752 | if line: |
|
755 | 753 | ui.warn(b'[') |
|
756 | 754 | if rev is None: |
|
757 | 755 | ui.warn(_(b'wdir'), label=b'evolve.rev') |
|
758 | 756 | else: |
|
759 | 757 | ui.warn(b'%d' % rev, label=b'evolve.rev') |
|
760 | 758 | ui.warn(b'] %s: %s\n' % (fixername, line)) |
|
761 | 759 | |
|
762 | 760 | |
|
763 | 761 | def writeworkingdir(repo, ctx, filedata, replacements): |
|
764 | 762 | """Write new content to the working copy and check out the new p1 if any |
|
765 | 763 | |
|
766 | 764 | We check out a new revision if and only if we fixed something in both the |
|
767 | 765 | working directory and its parent revision. This avoids the need for a full |
|
768 | 766 | update/merge, and means that the working directory simply isn't affected |
|
769 | 767 | unless the --working-dir flag is given. |
|
770 | 768 | |
|
771 | 769 | Directly updates the dirstate for the affected files. |
|
772 | 770 | """ |
|
773 |
for path, data in |
|
|
771 | for path, data in filedata.items(): | |
|
774 | 772 | fctx = ctx[path] |
|
775 | 773 | fctx.write(data, fctx.flags()) |
|
776 | 774 | |
|
777 | 775 | oldp1 = repo.dirstate.p1() |
|
778 | 776 | newp1 = replacements.get(oldp1, oldp1) |
|
779 | 777 | if newp1 != oldp1: |
|
780 | 778 | assert repo.dirstate.p2() == nullid |
|
781 | 779 | with repo.dirstate.parentchange(): |
|
782 | 780 | scmutil.movedirstate(repo, repo[newp1]) |
|
783 | 781 | |
|
784 | 782 | |
|
785 | 783 | def replacerev(ui, repo, ctx, filedata, replacements): |
|
786 | 784 | """Commit a new revision like the given one, but with file content changes |
|
787 | 785 | |
|
788 | 786 | "ctx" is the original revision to be replaced by a modified one. |
|
789 | 787 | |
|
790 | 788 | "filedata" is a dict that maps paths to their new file content. All other |
|
791 | 789 | paths will be recreated from the original revision without changes. |
|
792 | 790 | "filedata" may contain paths that didn't exist in the original revision; |
|
793 | 791 | they will be added. |
|
794 | 792 | |
|
795 | 793 | "replacements" is a dict that maps a single node to a single node, and it is |
|
796 | 794 | updated to indicate the original revision is replaced by the newly created |
|
797 | 795 | one. No entry is added if the replacement's node already exists. |
|
798 | 796 | |
|
799 | 797 | The new revision has the same parents as the old one, unless those parents |
|
800 | 798 | have already been replaced, in which case those replacements are the parents |
|
801 | 799 | of this new revision. Thus, if revisions are replaced in topological order, |
|
802 | 800 | there is no need to rebase them into the original topology later. |
|
803 | 801 | """ |
|
804 | 802 | |
|
805 | 803 | p1rev, p2rev = repo.changelog.parentrevs(ctx.rev()) |
|
806 | 804 | p1ctx, p2ctx = repo[p1rev], repo[p2rev] |
|
807 | 805 | newp1node = replacements.get(p1ctx.node(), p1ctx.node()) |
|
808 | 806 | newp2node = replacements.get(p2ctx.node(), p2ctx.node()) |
|
809 | 807 | |
|
810 | 808 | # We don't want to create a revision that has no changes from the original, |
|
811 | 809 | # but we should if the original revision's parent has been replaced. |
|
812 | 810 | # Otherwise, we would produce an orphan that needs no actual human |
|
813 | 811 | # intervention to evolve. We can't rely on commit() to avoid creating the |
|
814 | 812 | # un-needed revision because the extra field added below produces a new hash |
|
815 | 813 | # regardless of file content changes. |
|
816 | 814 | if ( |
|
817 | 815 | not filedata |
|
818 | 816 | and p1ctx.node() not in replacements |
|
819 | 817 | and p2ctx.node() not in replacements |
|
820 | 818 | ): |
|
821 | 819 | return |
|
822 | 820 | |
|
823 | 821 | extra = ctx.extra().copy() |
|
824 | 822 | extra[b'fix_source'] = ctx.hex() |
|
825 | 823 | |
|
826 | 824 | wctx = context.overlayworkingctx(repo) |
|
827 | 825 | wctx.setbase(repo[newp1node]) |
|
828 | 826 | merge.revert_to(ctx, wc=wctx) |
|
829 | 827 | copies.graftcopies(wctx, ctx, ctx.p1()) |
|
830 | 828 | |
|
831 | 829 | for path in filedata.keys(): |
|
832 | 830 | fctx = ctx[path] |
|
833 | 831 | copysource = fctx.copysource() |
|
834 | 832 | wctx.write(path, filedata[path], flags=fctx.flags()) |
|
835 | 833 | if copysource: |
|
836 | 834 | wctx.markcopied(path, copysource) |
|
837 | 835 | |
|
838 | 836 | desc = rewriteutil.update_hash_refs( |
|
839 | 837 | repo, |
|
840 | 838 | ctx.description(), |
|
841 | 839 | {oldnode: [newnode] for oldnode, newnode in replacements.items()}, |
|
842 | 840 | ) |
|
843 | 841 | |
|
844 | 842 | memctx = wctx.tomemctx( |
|
845 | 843 | text=desc, |
|
846 | 844 | branch=ctx.branch(), |
|
847 | 845 | extra=extra, |
|
848 | 846 | date=ctx.date(), |
|
849 | 847 | parents=(newp1node, newp2node), |
|
850 | 848 | user=ctx.user(), |
|
851 | 849 | ) |
|
852 | 850 | |
|
853 | 851 | sucnode = memctx.commit() |
|
854 | 852 | prenode = ctx.node() |
|
855 | 853 | if prenode == sucnode: |
|
856 | 854 | ui.debug(b'node %s already existed\n' % (ctx.hex())) |
|
857 | 855 | else: |
|
858 | 856 | replacements[ctx.node()] = sucnode |
|
859 | 857 | |
|
860 | 858 | |
|
861 | 859 | def getfixers(ui): |
|
862 | 860 | """Returns a map of configured fixer tools indexed by their names |
|
863 | 861 | |
|
864 | 862 | Each value is a Fixer object with methods that implement the behavior of the |
|
865 | 863 | fixer's config suboptions. Does not validate the config values. |
|
866 | 864 | """ |
|
867 | 865 | fixers = {} |
|
868 | 866 | for name in fixernames(ui): |
|
869 | 867 | enabled = ui.configbool(b'fix', name + b':enabled') |
|
870 | 868 | command = ui.config(b'fix', name + b':command') |
|
871 | 869 | pattern = ui.config(b'fix', name + b':pattern') |
|
872 | 870 | linerange = ui.config(b'fix', name + b':linerange') |
|
873 | 871 | priority = ui.configint(b'fix', name + b':priority') |
|
874 | 872 | metadata = ui.configbool(b'fix', name + b':metadata') |
|
875 | 873 | skipclean = ui.configbool(b'fix', name + b':skipclean') |
|
876 | 874 | # Don't use a fixer if it has no pattern configured. It would be |
|
877 | 875 | # dangerous to let it affect all files. It would be pointless to let it |
|
878 | 876 | # affect no files. There is no reasonable subset of files to use as the |
|
879 | 877 | # default. |
|
880 | 878 | if command is None: |
|
881 | 879 | ui.warn( |
|
882 | 880 | _(b'fixer tool has no command configuration: %s\n') % (name,) |
|
883 | 881 | ) |
|
884 | 882 | elif pattern is None: |
|
885 | 883 | ui.warn( |
|
886 | 884 | _(b'fixer tool has no pattern configuration: %s\n') % (name,) |
|
887 | 885 | ) |
|
888 | 886 | elif not enabled: |
|
889 | 887 | ui.debug(b'ignoring disabled fixer tool: %s\n' % (name,)) |
|
890 | 888 | else: |
|
891 | 889 | fixers[name] = Fixer( |
|
892 | 890 | command, pattern, linerange, priority, metadata, skipclean |
|
893 | 891 | ) |
|
894 | 892 | return collections.OrderedDict( |
|
895 | 893 | sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True) |
|
896 | 894 | ) |
|
897 | 895 | |
|
898 | 896 | |
|
899 | 897 | def fixernames(ui): |
|
900 | 898 | """Returns the names of [fix] config options that have suboptions""" |
|
901 | 899 | names = set() |
|
902 | 900 | for k, v in ui.configitems(b'fix'): |
|
903 | 901 | if b':' in k: |
|
904 | 902 | names.add(k.split(b':', 1)[0]) |
|
905 | 903 | return names |
|
906 | 904 | |
|
907 | 905 | |
|
908 | 906 | class Fixer(object): |
|
909 | 907 | """Wraps the raw config values for a fixer with methods""" |
|
910 | 908 | |
|
911 | 909 | def __init__( |
|
912 | 910 | self, command, pattern, linerange, priority, metadata, skipclean |
|
913 | 911 | ): |
|
914 | 912 | self._command = command |
|
915 | 913 | self._pattern = pattern |
|
916 | 914 | self._linerange = linerange |
|
917 | 915 | self._priority = priority |
|
918 | 916 | self._metadata = metadata |
|
919 | 917 | self._skipclean = skipclean |
|
920 | 918 | |
|
921 | 919 | def affects(self, opts, fixctx, path): |
|
922 | 920 | """Should this fixer run on the file at the given path and context?""" |
|
923 | 921 | repo = fixctx.repo() |
|
924 | 922 | matcher = matchmod.match( |
|
925 | 923 | repo.root, repo.root, [self._pattern], ctx=fixctx |
|
926 | 924 | ) |
|
927 | 925 | return matcher(path) |
|
928 | 926 | |
|
929 | 927 | def shouldoutputmetadata(self): |
|
930 | 928 | """Should the stdout of this fixer start with JSON and a null byte?""" |
|
931 | 929 | return self._metadata |
|
932 | 930 | |
|
933 | 931 | def command(self, ui, path, ranges): |
|
934 | 932 | """A shell command to use to invoke this fixer on the given file/lines |
|
935 | 933 | |
|
936 | 934 | May return None if there is no appropriate command to run for the given |
|
937 | 935 | parameters. |
|
938 | 936 | """ |
|
939 | 937 | expand = cmdutil.rendercommandtemplate |
|
940 | 938 | parts = [ |
|
941 | 939 | expand( |
|
942 | 940 | ui, |
|
943 | 941 | self._command, |
|
944 | 942 | {b'rootpath': path, b'basename': os.path.basename(path)}, |
|
945 | 943 | ) |
|
946 | 944 | ] |
|
947 | 945 | if self._linerange: |
|
948 | 946 | if self._skipclean and not ranges: |
|
949 | 947 | # No line ranges to fix, so don't run the fixer. |
|
950 | 948 | return None |
|
951 | 949 | for first, last in ranges: |
|
952 | 950 | parts.append( |
|
953 | 951 | expand( |
|
954 | 952 | ui, self._linerange, {b'first': first, b'last': last} |
|
955 | 953 | ) |
|
956 | 954 | ) |
|
957 | 955 | return b' '.join(parts) |
@@ -1,1004 +1,1000 b'' | |||
|
1 | 1 | # __init__.py - fsmonitor initialization and overrides |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013-2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | Integrates the file-watching program Watchman with Mercurial to produce faster |
|
11 | 11 | status results. |
|
12 | 12 | |
|
13 | 13 | On a particular Linux system, for a real-world repository with over 400,000 |
|
14 | 14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same |
|
15 | 15 | system, with fsmonitor it takes about 0.3 seconds. |
|
16 | 16 | |
|
17 | 17 | fsmonitor requires no configuration -- it will tell Watchman about your |
|
18 | 18 | repository as necessary. You'll need to install Watchman from |
|
19 | 19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. |
|
20 | 20 | |
|
21 | 21 | fsmonitor is incompatible with the largefiles and eol extensions, and |
|
22 | 22 | will disable itself if any of those are active. |
|
23 | 23 | |
|
24 | 24 | The following configuration options exist: |
|
25 | 25 | |
|
26 | 26 | :: |
|
27 | 27 | |
|
28 | 28 | [fsmonitor] |
|
29 | 29 | mode = {off, on, paranoid} |
|
30 | 30 | |
|
31 | 31 | When `mode = off`, fsmonitor will disable itself (similar to not loading the |
|
32 | 32 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). |
|
33 | 33 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, |
|
34 | 34 | and ensure that the results are consistent. |
|
35 | 35 | |
|
36 | 36 | :: |
|
37 | 37 | |
|
38 | 38 | [fsmonitor] |
|
39 | 39 | timeout = (float) |
|
40 | 40 | |
|
41 | 41 | A value, in seconds, that determines how long fsmonitor will wait for Watchman |
|
42 | 42 | to return results. Defaults to `2.0`. |
|
43 | 43 | |
|
44 | 44 | :: |
|
45 | 45 | |
|
46 | 46 | [fsmonitor] |
|
47 | 47 | blacklistusers = (list of userids) |
|
48 | 48 | |
|
49 | 49 | A list of usernames for which fsmonitor will disable itself altogether. |
|
50 | 50 | |
|
51 | 51 | :: |
|
52 | 52 | |
|
53 | 53 | [fsmonitor] |
|
54 | 54 | walk_on_invalidate = (boolean) |
|
55 | 55 | |
|
56 | 56 | Whether or not to walk the whole repo ourselves when our cached state has been |
|
57 | 57 | invalidated, for example when Watchman has been restarted or .hgignore rules |
|
58 | 58 | have been changed. Walking the repo in that case can result in competing for |
|
59 | 59 | I/O with Watchman. For large repos it is recommended to set this value to |
|
60 | 60 | false. You may wish to set this to true if you have a very fast filesystem |
|
61 | 61 | that can outpace the IPC overhead of getting the result data for the full repo |
|
62 | 62 | from Watchman. Defaults to false. |
|
63 | 63 | |
|
64 | 64 | :: |
|
65 | 65 | |
|
66 | 66 | [fsmonitor] |
|
67 | 67 | warn_when_unused = (boolean) |
|
68 | 68 | |
|
69 | 69 | Whether to print a warning during certain operations when fsmonitor would be |
|
70 | 70 | beneficial to performance but isn't enabled. |
|
71 | 71 | |
|
72 | 72 | :: |
|
73 | 73 | |
|
74 | 74 | [fsmonitor] |
|
75 | 75 | warn_update_file_count = (integer) |
|
76 | 76 | # or when mercurial is built with rust support |
|
77 | 77 | warn_update_file_count_rust = (integer) |
|
78 | 78 | |
|
79 | 79 | If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will |
|
80 | 80 | be printed during working directory updates if this many files will be |
|
81 | 81 | created. |
|
82 | 82 | ''' |
|
83 | 83 | |
|
84 | 84 | # Platforms Supported |
|
85 | 85 | # =================== |
|
86 | 86 | # |
|
87 | 87 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, |
|
88 | 88 | # even under severe loads. |
|
89 | 89 | # |
|
90 | 90 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor |
|
91 | 91 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of |
|
92 | 92 | # user testing under normal loads. |
|
93 | 93 | # |
|
94 | 94 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but |
|
95 | 95 | # very little testing has been done. |
|
96 | 96 | # |
|
97 | 97 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. |
|
98 | 98 | # |
|
99 | 99 | # Known Issues |
|
100 | 100 | # ============ |
|
101 | 101 | # |
|
102 | 102 | # * fsmonitor will disable itself if any of the following extensions are |
|
103 | 103 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. |
|
104 | 104 | # * fsmonitor will produce incorrect results if nested repos that are not |
|
105 | 105 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. |
|
106 | 106 | # |
|
107 | 107 | # The issues related to nested repos and subrepos are probably not fundamental |
|
108 | 108 | # ones. Patches to fix them are welcome. |
|
109 | 109 | |
|
110 | 110 | |
|
111 | 111 | import codecs |
|
112 | 112 | import os |
|
113 | 113 | import stat |
|
114 | 114 | import sys |
|
115 | 115 | import tempfile |
|
116 | 116 | import weakref |
|
117 | 117 | |
|
118 | 118 | from mercurial.i18n import _ |
|
119 | 119 | from mercurial.node import hex |
|
120 | 120 | from mercurial.pycompat import open |
|
121 | 121 | from mercurial import ( |
|
122 | 122 | context, |
|
123 | 123 | encoding, |
|
124 | 124 | error, |
|
125 | 125 | extensions, |
|
126 | 126 | localrepo, |
|
127 | 127 | merge, |
|
128 | 128 | pathutil, |
|
129 | 129 | pycompat, |
|
130 | 130 | registrar, |
|
131 | 131 | scmutil, |
|
132 | 132 | util, |
|
133 | 133 | ) |
|
134 | 134 | from mercurial import match as matchmod |
|
135 | 135 | from mercurial.utils import ( |
|
136 | 136 | hashutil, |
|
137 | 137 | stringutil, |
|
138 | 138 | ) |
|
139 | 139 | |
|
140 | 140 | from . import ( |
|
141 | 141 | pywatchman, |
|
142 | 142 | state, |
|
143 | 143 | watchmanclient, |
|
144 | 144 | ) |
|
145 | 145 | |
|
146 | 146 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
147 | 147 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
148 | 148 | # be specifying the version(s) of Mercurial they are tested with, or |
|
149 | 149 | # leave the attribute unspecified. |
|
150 | 150 | testedwith = b'ships-with-hg-core' |
|
151 | 151 | |
|
152 | 152 | configtable = {} |
|
153 | 153 | configitem = registrar.configitem(configtable) |
|
154 | 154 | |
|
155 | 155 | configitem( |
|
156 | 156 | b'fsmonitor', |
|
157 | 157 | b'mode', |
|
158 | 158 | default=b'on', |
|
159 | 159 | ) |
|
160 | 160 | configitem( |
|
161 | 161 | b'fsmonitor', |
|
162 | 162 | b'walk_on_invalidate', |
|
163 | 163 | default=False, |
|
164 | 164 | ) |
|
165 | 165 | configitem( |
|
166 | 166 | b'fsmonitor', |
|
167 | 167 | b'timeout', |
|
168 | 168 | default=b'2', |
|
169 | 169 | ) |
|
170 | 170 | configitem( |
|
171 | 171 | b'fsmonitor', |
|
172 | 172 | b'blacklistusers', |
|
173 | 173 | default=list, |
|
174 | 174 | ) |
|
175 | 175 | configitem( |
|
176 | 176 | b'fsmonitor', |
|
177 | 177 | b'watchman_exe', |
|
178 | 178 | default=b'watchman', |
|
179 | 179 | ) |
|
180 | 180 | configitem( |
|
181 | 181 | b'fsmonitor', |
|
182 | 182 | b'verbose', |
|
183 | 183 | default=True, |
|
184 | 184 | experimental=True, |
|
185 | 185 | ) |
|
186 | 186 | configitem( |
|
187 | 187 | b'experimental', |
|
188 | 188 | b'fsmonitor.transaction_notify', |
|
189 | 189 | default=False, |
|
190 | 190 | ) |
|
191 | 191 | |
|
192 | 192 | # This extension is incompatible with the following blacklisted extensions |
|
193 | 193 | # and will disable itself when encountering one of these: |
|
194 | 194 | _blacklist = [b'largefiles', b'eol'] |
|
195 | 195 | |
|
196 | 196 | |
|
197 | 197 | def debuginstall(ui, fm): |
|
198 | 198 | fm.write( |
|
199 | 199 | b"fsmonitor-watchman", |
|
200 | 200 | _(b"fsmonitor checking for watchman binary... (%s)\n"), |
|
201 | 201 | ui.configpath(b"fsmonitor", b"watchman_exe"), |
|
202 | 202 | ) |
|
203 | 203 | root = tempfile.mkdtemp() |
|
204 | 204 | c = watchmanclient.client(ui, root) |
|
205 | 205 | err = None |
|
206 | 206 | try: |
|
207 | 207 | v = c.command(b"version") |
|
208 | 208 | fm.write( |
|
209 | 209 | b"fsmonitor-watchman-version", |
|
210 | 210 | _(b" watchman binary version %s\n"), |
|
211 | 211 | pycompat.bytestr(v["version"]), |
|
212 | 212 | ) |
|
213 | 213 | except watchmanclient.Unavailable as e: |
|
214 | 214 | err = stringutil.forcebytestr(e) |
|
215 | 215 | fm.condwrite( |
|
216 | 216 | err, |
|
217 | 217 | b"fsmonitor-watchman-error", |
|
218 | 218 | _(b" watchman binary missing or broken: %s\n"), |
|
219 | 219 | err, |
|
220 | 220 | ) |
|
221 | 221 | return 1 if err else 0 |
|
222 | 222 | |
|
223 | 223 | |
|
224 | 224 | def _handleunavailable(ui, state, ex): |
|
225 | 225 | """Exception handler for Watchman interaction exceptions""" |
|
226 | 226 | if isinstance(ex, watchmanclient.Unavailable): |
|
227 | 227 | # experimental config: fsmonitor.verbose |
|
228 | 228 | if ex.warn and ui.configbool(b'fsmonitor', b'verbose'): |
|
229 | 229 | if b'illegal_fstypes' not in stringutil.forcebytestr(ex): |
|
230 | 230 | ui.warn(stringutil.forcebytestr(ex) + b'\n') |
|
231 | 231 | if ex.invalidate: |
|
232 | 232 | state.invalidate() |
|
233 | 233 | # experimental config: fsmonitor.verbose |
|
234 | 234 | if ui.configbool(b'fsmonitor', b'verbose'): |
|
235 | 235 | ui.log( |
|
236 | 236 | b'fsmonitor', |
|
237 | 237 | b'Watchman unavailable: %s\n', |
|
238 | 238 | stringutil.forcebytestr(ex.msg), |
|
239 | 239 | ) |
|
240 | 240 | else: |
|
241 | 241 | ui.log( |
|
242 | 242 | b'fsmonitor', |
|
243 | 243 | b'Watchman exception: %s\n', |
|
244 | 244 | stringutil.forcebytestr(ex), |
|
245 | 245 | ) |
|
246 | 246 | |
|
247 | 247 | |
|
248 | 248 | def _hashignore(ignore): |
|
249 | 249 | """Calculate hash for ignore patterns and filenames |
|
250 | 250 | |
|
251 | 251 | If this information changes between Mercurial invocations, we can't |
|
252 | 252 | rely on Watchman information anymore and have to re-scan the working |
|
253 | 253 | copy. |
|
254 | 254 | |
|
255 | 255 | """ |
|
256 | 256 | sha1 = hashutil.sha1() |
|
257 | 257 | sha1.update(pycompat.byterepr(ignore)) |
|
258 | 258 | return pycompat.sysbytes(sha1.hexdigest()) |
|
259 | 259 | |
|
260 | 260 | |
|
261 | 261 | _watchmanencoding = pywatchman.encoding.get_local_encoding() |
|
262 | 262 | _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding() |
|
263 | 263 | _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding) |
|
264 | 264 | |
|
265 | 265 | |
|
266 | 266 | def _watchmantofsencoding(path): |
|
267 | 267 | """Fix path to match watchman and local filesystem encoding |
|
268 | 268 | |
|
269 | 269 | watchman's paths encoding can differ from filesystem encoding. For example, |
|
270 | 270 | on Windows, it's always utf-8. |
|
271 | 271 | """ |
|
272 | 272 | try: |
|
273 | 273 | decoded = path.decode(_watchmanencoding) |
|
274 | 274 | except UnicodeDecodeError as e: |
|
275 | 275 | raise error.Abort( |
|
276 | 276 | stringutil.forcebytestr(e), hint=b'watchman encoding error' |
|
277 | 277 | ) |
|
278 | 278 | |
|
279 | 279 | try: |
|
280 | 280 | encoded = decoded.encode(_fsencoding, 'strict') |
|
281 | 281 | except UnicodeEncodeError as e: |
|
282 | 282 | raise error.Abort(stringutil.forcebytestr(e)) |
|
283 | 283 | |
|
284 | 284 | return encoded |
|
285 | 285 | |
|
286 | 286 | |
|
287 | 287 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
288 | 288 | """Replacement for dirstate.walk, hooking into Watchman. |
|
289 | 289 | |
|
290 | 290 | Whenever full is False, ignored is False, and the Watchman client is |
|
291 | 291 | available, use Watchman combined with saved state to possibly return only a |
|
292 | 292 | subset of files.""" |
|
293 | 293 | |
|
294 | 294 | def bail(reason): |
|
295 | 295 | self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason) |
|
296 | 296 | return orig(match, subrepos, unknown, ignored, full=True) |
|
297 | 297 | |
|
298 | 298 | if full: |
|
299 | 299 | return bail(b'full rewalk requested') |
|
300 | 300 | if ignored: |
|
301 | 301 | return bail(b'listing ignored files') |
|
302 | 302 | if not self._watchmanclient.available(): |
|
303 | 303 | return bail(b'client unavailable') |
|
304 | 304 | state = self._fsmonitorstate |
|
305 | 305 | clock, ignorehash, notefiles = state.get() |
|
306 | 306 | if not clock: |
|
307 | 307 | if state.walk_on_invalidate: |
|
308 | 308 | return bail(b'no clock') |
|
309 | 309 | # Initial NULL clock value, see |
|
310 | 310 | # https://facebook.github.io/watchman/docs/clockspec.html |
|
311 | 311 | clock = b'c:0:0' |
|
312 | 312 | notefiles = [] |
|
313 | 313 | |
|
314 | 314 | ignore = self._ignore |
|
315 | 315 | dirignore = self._dirignore |
|
316 | 316 | if unknown: |
|
317 | 317 | if _hashignore(ignore) != ignorehash and clock != b'c:0:0': |
|
318 | 318 | # ignore list changed -- can't rely on Watchman state any more |
|
319 | 319 | if state.walk_on_invalidate: |
|
320 | 320 | return bail(b'ignore rules changed') |
|
321 | 321 | notefiles = [] |
|
322 | 322 | clock = b'c:0:0' |
|
323 | 323 | else: |
|
324 | 324 | # always ignore |
|
325 | 325 | ignore = util.always |
|
326 | 326 | dirignore = util.always |
|
327 | 327 | |
|
328 | 328 | matchfn = match.matchfn |
|
329 | 329 | matchalways = match.always() |
|
330 | 330 | dmap = self._map |
|
331 | 331 | if util.safehasattr(dmap, b'_map'): |
|
332 | 332 | # for better performance, directly access the inner dirstate map if the |
|
333 | 333 | # standard dirstate implementation is in use. |
|
334 | 334 | dmap = dmap._map |
|
335 | 335 | nonnormalset = { |
|
336 | 336 | f |
|
337 | 337 | for f, e in self._map.items() |
|
338 | 338 | if e.v1_state() != b"n" or e.v1_mtime() == -1 |
|
339 | 339 | } |
|
340 | 340 | |
|
341 | 341 | copymap = self._map.copymap |
|
342 | 342 | getkind = stat.S_IFMT |
|
343 | 343 | dirkind = stat.S_IFDIR |
|
344 | 344 | regkind = stat.S_IFREG |
|
345 | 345 | lnkkind = stat.S_IFLNK |
|
346 | 346 | join = self._join |
|
347 | 347 | normcase = util.normcase |
|
348 | 348 | fresh_instance = False |
|
349 | 349 | |
|
350 | 350 | exact = skipstep3 = False |
|
351 | 351 | if match.isexact(): # match.exact |
|
352 | 352 | exact = True |
|
353 | 353 | dirignore = util.always # skip step 2 |
|
354 | 354 | elif match.prefix(): # match.match, no patterns |
|
355 | 355 | skipstep3 = True |
|
356 | 356 | |
|
357 | 357 | if not exact and self._checkcase: |
|
358 | 358 | # note that even though we could receive directory entries, we're only |
|
359 | 359 | # interested in checking if a file with the same name exists. So only |
|
360 | 360 | # normalize files if possible. |
|
361 | 361 | normalize = self._normalizefile |
|
362 | 362 | skipstep3 = False |
|
363 | 363 | else: |
|
364 | 364 | normalize = None |
|
365 | 365 | |
|
366 | 366 | # step 1: find all explicit files |
|
367 | 367 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
368 | 368 | |
|
369 | 369 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
370 | 370 | work = [d for d in work if not dirignore(d[0])] |
|
371 | 371 | |
|
372 | 372 | if not work and (exact or skipstep3): |
|
373 | 373 | for s in subrepos: |
|
374 | 374 | del results[s] |
|
375 | 375 | del results[b'.hg'] |
|
376 | 376 | return results |
|
377 | 377 | |
|
378 | 378 | # step 2: query Watchman |
|
379 | 379 | try: |
|
380 | 380 | # Use the user-configured timeout for the query. |
|
381 | 381 | # Add a little slack over the top of the user query to allow for |
|
382 | 382 | # overheads while transferring the data |
|
383 | 383 | self._watchmanclient.settimeout(state.timeout + 0.1) |
|
384 | 384 | result = self._watchmanclient.command( |
|
385 | 385 | b'query', |
|
386 | 386 | { |
|
387 | 387 | b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'], |
|
388 | 388 | b'since': clock, |
|
389 | 389 | b'expression': [ |
|
390 | 390 | b'not', |
|
391 | 391 | [ |
|
392 | 392 | b'anyof', |
|
393 | 393 | [b'dirname', b'.hg'], |
|
394 | 394 | [b'name', b'.hg', b'wholename'], |
|
395 | 395 | ], |
|
396 | 396 | ], |
|
397 | 397 | b'sync_timeout': int(state.timeout * 1000), |
|
398 | 398 | b'empty_on_fresh_instance': state.walk_on_invalidate, |
|
399 | 399 | }, |
|
400 | 400 | ) |
|
401 | 401 | except Exception as ex: |
|
402 | 402 | _handleunavailable(self._ui, state, ex) |
|
403 | 403 | self._watchmanclient.clearconnection() |
|
404 | 404 | return bail(b'exception during run') |
|
405 | 405 | else: |
|
406 | 406 | # We need to propagate the last observed clock up so that we |
|
407 | 407 | # can use it for our next query |
|
408 | 408 | state.setlastclock(pycompat.sysbytes(result[b'clock'])) |
|
409 | 409 | if result[b'is_fresh_instance']: |
|
410 | 410 | if state.walk_on_invalidate: |
|
411 | 411 | state.invalidate() |
|
412 | 412 | return bail(b'fresh instance') |
|
413 | 413 | fresh_instance = True |
|
414 | 414 | # Ignore any prior noteable files from the state info |
|
415 | 415 | notefiles = [] |
|
416 | 416 | |
|
417 | 417 | # for file paths which require normalization and we encounter a case |
|
418 | 418 | # collision, we store our own foldmap |
|
419 | 419 | if normalize: |
|
420 | 420 | foldmap = {normcase(k): k for k in results} |
|
421 | 421 | |
|
422 | 422 | switch_slashes = pycompat.ossep == b'\\' |
|
423 | 423 | # The order of the results is, strictly speaking, undefined. |
|
424 | 424 | # For case changes on a case insensitive filesystem we may receive |
|
425 | 425 | # two entries, one with exists=True and another with exists=False. |
|
426 | 426 | # The exists=True entries in the same response should be interpreted |
|
427 | 427 | # as being happens-after the exists=False entries due to the way that |
|
428 | 428 | # Watchman tracks files. We use this property to reconcile deletes |
|
429 | 429 | # for name case changes. |
|
430 | 430 | for entry in result[b'files']: |
|
431 | 431 | fname = entry[b'name'] |
|
432 | 432 | |
|
433 | 433 | # Watchman always give us a str. Normalize to bytes on Python 3 |
|
434 | 434 | # using Watchman's encoding, if needed. |
|
435 | 435 | if not isinstance(fname, bytes): |
|
436 | 436 | fname = fname.encode(_watchmanencoding) |
|
437 | 437 | |
|
438 | 438 | if _fixencoding: |
|
439 | 439 | fname = _watchmantofsencoding(fname) |
|
440 | 440 | |
|
441 | 441 | if switch_slashes: |
|
442 | 442 | fname = fname.replace(b'\\', b'/') |
|
443 | 443 | if normalize: |
|
444 | 444 | normed = normcase(fname) |
|
445 | 445 | fname = normalize(fname, True, True) |
|
446 | 446 | foldmap[normed] = fname |
|
447 | 447 | fmode = entry[b'mode'] |
|
448 | 448 | fexists = entry[b'exists'] |
|
449 | 449 | kind = getkind(fmode) |
|
450 | 450 | |
|
451 | 451 | if b'/.hg/' in fname or fname.endswith(b'/.hg'): |
|
452 | 452 | return bail(b'nested-repo-detected') |
|
453 | 453 | |
|
454 | 454 | if not fexists: |
|
455 | 455 | # if marked as deleted and we don't already have a change |
|
456 | 456 | # record, mark it as deleted. If we already have an entry |
|
457 | 457 | # for fname then it was either part of walkexplicit or was |
|
458 | 458 | # an earlier result that was a case change |
|
459 | 459 | if ( |
|
460 | 460 | fname not in results |
|
461 | 461 | and fname in dmap |
|
462 | 462 | and (matchalways or matchfn(fname)) |
|
463 | 463 | ): |
|
464 | 464 | results[fname] = None |
|
465 | 465 | elif kind == dirkind: |
|
466 | 466 | if fname in dmap and (matchalways or matchfn(fname)): |
|
467 | 467 | results[fname] = None |
|
468 | 468 | elif kind == regkind or kind == lnkkind: |
|
469 | 469 | if fname in dmap: |
|
470 | 470 | if matchalways or matchfn(fname): |
|
471 | 471 | results[fname] = entry |
|
472 | 472 | elif (matchalways or matchfn(fname)) and not ignore(fname): |
|
473 | 473 | results[fname] = entry |
|
474 | 474 | elif fname in dmap and (matchalways or matchfn(fname)): |
|
475 | 475 | results[fname] = None |
|
476 | 476 | |
|
477 | 477 | # step 3: query notable files we don't already know about |
|
478 | 478 | # XXX try not to iterate over the entire dmap |
|
479 | 479 | if normalize: |
|
480 | 480 | # any notable files that have changed case will already be handled |
|
481 | 481 | # above, so just check membership in the foldmap |
|
482 | 482 | notefiles = { |
|
483 | 483 | normalize(f, True, True) |
|
484 | 484 | for f in notefiles |
|
485 | 485 | if normcase(f) not in foldmap |
|
486 | 486 | } |
|
487 | 487 | visit = { |
|
488 | 488 | f |
|
489 | 489 | for f in notefiles |
|
490 | 490 | if (f not in results and matchfn(f) and (f in dmap or not ignore(f))) |
|
491 | 491 | } |
|
492 | 492 | |
|
493 | 493 | if not fresh_instance: |
|
494 | 494 | if matchalways: |
|
495 | 495 | visit.update(f for f in nonnormalset if f not in results) |
|
496 | 496 | visit.update(f for f in copymap if f not in results) |
|
497 | 497 | else: |
|
498 | 498 | visit.update( |
|
499 | 499 | f for f in nonnormalset if f not in results and matchfn(f) |
|
500 | 500 | ) |
|
501 | 501 | visit.update(f for f in copymap if f not in results and matchfn(f)) |
|
502 | 502 | else: |
|
503 | 503 | if matchalways: |
|
504 | visit.update( | |
|
505 | f for f, st in pycompat.iteritems(dmap) if f not in results | |
|
506 | ) | |
|
504 | visit.update(f for f, st in dmap.items() if f not in results) | |
|
507 | 505 | visit.update(f for f in copymap if f not in results) |
|
508 | 506 | else: |
|
509 | 507 | visit.update( |
|
510 | f | |
|
511 | for f, st in pycompat.iteritems(dmap) | |
|
512 | if f not in results and matchfn(f) | |
|
508 | f for f, st in dmap.items() if f not in results and matchfn(f) | |
|
513 | 509 | ) |
|
514 | 510 | visit.update(f for f in copymap if f not in results and matchfn(f)) |
|
515 | 511 | |
|
516 | 512 | audit = pathutil.pathauditor(self._root, cached=True).check |
|
517 | 513 | auditpass = [f for f in visit if audit(f)] |
|
518 | 514 | auditpass.sort() |
|
519 | 515 | auditfail = visit.difference(auditpass) |
|
520 | 516 | for f in auditfail: |
|
521 | 517 | results[f] = None |
|
522 | 518 | |
|
523 | 519 | nf = iter(auditpass) |
|
524 | 520 | for st in util.statfiles([join(f) for f in auditpass]): |
|
525 | 521 | f = next(nf) |
|
526 | 522 | if st or f in dmap: |
|
527 | 523 | results[f] = st |
|
528 | 524 | |
|
529 | 525 | for s in subrepos: |
|
530 | 526 | del results[s] |
|
531 | 527 | del results[b'.hg'] |
|
532 | 528 | return results |
|
533 | 529 | |
|
534 | 530 | |
|
535 | 531 | def overridestatus( |
|
536 | 532 | orig, |
|
537 | 533 | self, |
|
538 | 534 | node1=b'.', |
|
539 | 535 | node2=None, |
|
540 | 536 | match=None, |
|
541 | 537 | ignored=False, |
|
542 | 538 | clean=False, |
|
543 | 539 | unknown=False, |
|
544 | 540 | listsubrepos=False, |
|
545 | 541 | ): |
|
546 | 542 | listignored = ignored |
|
547 | 543 | listclean = clean |
|
548 | 544 | listunknown = unknown |
|
549 | 545 | |
|
550 | 546 | def _cmpsets(l1, l2): |
|
551 | 547 | try: |
|
552 | 548 | if b'FSMONITOR_LOG_FILE' in encoding.environ: |
|
553 | 549 | fn = encoding.environ[b'FSMONITOR_LOG_FILE'] |
|
554 | 550 | f = open(fn, b'wb') |
|
555 | 551 | else: |
|
556 | 552 | fn = b'fsmonitorfail.log' |
|
557 | 553 | f = self.vfs.open(fn, b'wb') |
|
558 | 554 | except (IOError, OSError): |
|
559 | 555 | self.ui.warn(_(b'warning: unable to write to %s\n') % fn) |
|
560 | 556 | return |
|
561 | 557 | |
|
562 | 558 | try: |
|
563 | 559 | for i, (s1, s2) in enumerate(zip(l1, l2)): |
|
564 | 560 | if set(s1) != set(s2): |
|
565 | 561 | f.write(b'sets at position %d are unequal\n' % i) |
|
566 | 562 | f.write(b'watchman returned: %r\n' % s1) |
|
567 | 563 | f.write(b'stat returned: %r\n' % s2) |
|
568 | 564 | finally: |
|
569 | 565 | f.close() |
|
570 | 566 | |
|
571 | 567 | if isinstance(node1, context.changectx): |
|
572 | 568 | ctx1 = node1 |
|
573 | 569 | else: |
|
574 | 570 | ctx1 = self[node1] |
|
575 | 571 | if isinstance(node2, context.changectx): |
|
576 | 572 | ctx2 = node2 |
|
577 | 573 | else: |
|
578 | 574 | ctx2 = self[node2] |
|
579 | 575 | |
|
580 | 576 | working = ctx2.rev() is None |
|
581 | 577 | parentworking = working and ctx1 == self[b'.'] |
|
582 | 578 | match = match or matchmod.always() |
|
583 | 579 | |
|
584 | 580 | # Maybe we can use this opportunity to update Watchman's state. |
|
585 | 581 | # Mercurial uses workingcommitctx and/or memctx to represent the part of |
|
586 | 582 | # the workingctx that is to be committed. So don't update the state in |
|
587 | 583 | # that case. |
|
588 | 584 | # HG_PENDING is set in the environment when the dirstate is being updated |
|
589 | 585 | # in the middle of a transaction; we must not update our state in that |
|
590 | 586 | # case, or we risk forgetting about changes in the working copy. |
|
591 | 587 | updatestate = ( |
|
592 | 588 | parentworking |
|
593 | 589 | and match.always() |
|
594 | 590 | and not isinstance(ctx2, (context.workingcommitctx, context.memctx)) |
|
595 | 591 | and b'HG_PENDING' not in encoding.environ |
|
596 | 592 | ) |
|
597 | 593 | |
|
598 | 594 | try: |
|
599 | 595 | if self._fsmonitorstate.walk_on_invalidate: |
|
600 | 596 | # Use a short timeout to query the current clock. If that |
|
601 | 597 | # takes too long then we assume that the service will be slow |
|
602 | 598 | # to answer our query. |
|
603 | 599 | # walk_on_invalidate indicates that we prefer to walk the |
|
604 | 600 | # tree ourselves because we can ignore portions that Watchman |
|
605 | 601 | # cannot and we tend to be faster in the warmer buffer cache |
|
606 | 602 | # cases. |
|
607 | 603 | self._watchmanclient.settimeout(0.1) |
|
608 | 604 | else: |
|
609 | 605 | # Give Watchman more time to potentially complete its walk |
|
610 | 606 | # and return the initial clock. In this mode we assume that |
|
611 | 607 | # the filesystem will be slower than parsing a potentially |
|
612 | 608 | # very large Watchman result set. |
|
613 | 609 | self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1) |
|
614 | 610 | startclock = self._watchmanclient.getcurrentclock() |
|
615 | 611 | except Exception as ex: |
|
616 | 612 | self._watchmanclient.clearconnection() |
|
617 | 613 | _handleunavailable(self.ui, self._fsmonitorstate, ex) |
|
618 | 614 | # boo, Watchman failed. bail |
|
619 | 615 | return orig( |
|
620 | 616 | node1, |
|
621 | 617 | node2, |
|
622 | 618 | match, |
|
623 | 619 | listignored, |
|
624 | 620 | listclean, |
|
625 | 621 | listunknown, |
|
626 | 622 | listsubrepos, |
|
627 | 623 | ) |
|
628 | 624 | |
|
629 | 625 | if updatestate: |
|
630 | 626 | # We need info about unknown files. This may make things slower the |
|
631 | 627 | # first time, but whatever. |
|
632 | 628 | stateunknown = True |
|
633 | 629 | else: |
|
634 | 630 | stateunknown = listunknown |
|
635 | 631 | |
|
636 | 632 | if updatestate: |
|
637 | 633 | ps = poststatus(startclock) |
|
638 | 634 | self.addpostdsstatus(ps) |
|
639 | 635 | |
|
640 | 636 | r = orig( |
|
641 | 637 | node1, node2, match, listignored, listclean, stateunknown, listsubrepos |
|
642 | 638 | ) |
|
643 | 639 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
644 | 640 | |
|
645 | 641 | if not listunknown: |
|
646 | 642 | unknown = [] |
|
647 | 643 | |
|
648 | 644 | # don't do paranoid checks if we're not going to query Watchman anyway |
|
649 | 645 | full = listclean or match.traversedir is not None |
|
650 | 646 | if self._fsmonitorstate.mode == b'paranoid' and not full: |
|
651 | 647 | # run status again and fall back to the old walk this time |
|
652 | 648 | self.dirstate._fsmonitordisable = True |
|
653 | 649 | |
|
654 | 650 | # shut the UI up |
|
655 | 651 | quiet = self.ui.quiet |
|
656 | 652 | self.ui.quiet = True |
|
657 | 653 | fout, ferr = self.ui.fout, self.ui.ferr |
|
658 | 654 | self.ui.fout = self.ui.ferr = open(os.devnull, b'wb') |
|
659 | 655 | |
|
660 | 656 | try: |
|
661 | 657 | rv2 = orig( |
|
662 | 658 | node1, |
|
663 | 659 | node2, |
|
664 | 660 | match, |
|
665 | 661 | listignored, |
|
666 | 662 | listclean, |
|
667 | 663 | listunknown, |
|
668 | 664 | listsubrepos, |
|
669 | 665 | ) |
|
670 | 666 | finally: |
|
671 | 667 | self.dirstate._fsmonitordisable = False |
|
672 | 668 | self.ui.quiet = quiet |
|
673 | 669 | self.ui.fout, self.ui.ferr = fout, ferr |
|
674 | 670 | |
|
675 | 671 | # clean isn't tested since it's set to True above |
|
676 | 672 | with self.wlock(): |
|
677 | 673 | _cmpsets( |
|
678 | 674 | [modified, added, removed, deleted, unknown, ignored, clean], |
|
679 | 675 | rv2, |
|
680 | 676 | ) |
|
681 | 677 | modified, added, removed, deleted, unknown, ignored, clean = rv2 |
|
682 | 678 | |
|
683 | 679 | return scmutil.status( |
|
684 | 680 | modified, added, removed, deleted, unknown, ignored, clean |
|
685 | 681 | ) |
|
686 | 682 | |
|
687 | 683 | |
|
688 | 684 | class poststatus(object): |
|
689 | 685 | def __init__(self, startclock): |
|
690 | 686 | self._startclock = pycompat.sysbytes(startclock) |
|
691 | 687 | |
|
692 | 688 | def __call__(self, wctx, status): |
|
693 | 689 | clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock |
|
694 | 690 | hashignore = _hashignore(wctx.repo().dirstate._ignore) |
|
695 | 691 | notefiles = ( |
|
696 | 692 | status.modified |
|
697 | 693 | + status.added |
|
698 | 694 | + status.removed |
|
699 | 695 | + status.deleted |
|
700 | 696 | + status.unknown |
|
701 | 697 | ) |
|
702 | 698 | wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles) |
|
703 | 699 | |
|
704 | 700 | |
|
705 | 701 | def makedirstate(repo, dirstate): |
|
706 | 702 | class fsmonitordirstate(dirstate.__class__): |
|
707 | 703 | def _fsmonitorinit(self, repo): |
|
708 | 704 | # _fsmonitordisable is used in paranoid mode |
|
709 | 705 | self._fsmonitordisable = False |
|
710 | 706 | self._fsmonitorstate = repo._fsmonitorstate |
|
711 | 707 | self._watchmanclient = repo._watchmanclient |
|
712 | 708 | self._repo = weakref.proxy(repo) |
|
713 | 709 | |
|
714 | 710 | def walk(self, *args, **kwargs): |
|
715 | 711 | orig = super(fsmonitordirstate, self).walk |
|
716 | 712 | if self._fsmonitordisable: |
|
717 | 713 | return orig(*args, **kwargs) |
|
718 | 714 | return overridewalk(orig, self, *args, **kwargs) |
|
719 | 715 | |
|
720 | 716 | def rebuild(self, *args, **kwargs): |
|
721 | 717 | self._fsmonitorstate.invalidate() |
|
722 | 718 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) |
|
723 | 719 | |
|
724 | 720 | def invalidate(self, *args, **kwargs): |
|
725 | 721 | self._fsmonitorstate.invalidate() |
|
726 | 722 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) |
|
727 | 723 | |
|
728 | 724 | dirstate.__class__ = fsmonitordirstate |
|
729 | 725 | dirstate._fsmonitorinit(repo) |
|
730 | 726 | |
|
731 | 727 | |
|
732 | 728 | def wrapdirstate(orig, self): |
|
733 | 729 | ds = orig(self) |
|
734 | 730 | # only override the dirstate when Watchman is available for the repo |
|
735 | 731 | if util.safehasattr(self, b'_fsmonitorstate'): |
|
736 | 732 | makedirstate(self, ds) |
|
737 | 733 | return ds |
|
738 | 734 | |
|
739 | 735 | |
|
740 | 736 | def extsetup(ui): |
|
741 | 737 | extensions.wrapfilecache( |
|
742 | 738 | localrepo.localrepository, b'dirstate', wrapdirstate |
|
743 | 739 | ) |
|
744 | 740 | if pycompat.isdarwin: |
|
745 | 741 | # An assist for avoiding the dangling-symlink fsevents bug |
|
746 | 742 | extensions.wrapfunction(os, b'symlink', wrapsymlink) |
|
747 | 743 | |
|
748 | 744 | extensions.wrapfunction(merge, b'_update', wrapupdate) |
|
749 | 745 | |
|
750 | 746 | |
|
751 | 747 | def wrapsymlink(orig, source, link_name): |
|
752 | 748 | """if we create a dangling symlink, also touch the parent dir |
|
753 | 749 | to encourage fsevents notifications to work more correctly""" |
|
754 | 750 | try: |
|
755 | 751 | return orig(source, link_name) |
|
756 | 752 | finally: |
|
757 | 753 | try: |
|
758 | 754 | os.utime(os.path.dirname(link_name), None) |
|
759 | 755 | except OSError: |
|
760 | 756 | pass |
|
761 | 757 | |
|
762 | 758 | |
|
763 | 759 | class state_update(object): |
|
764 | 760 | """This context manager is responsible for dispatching the state-enter |
|
765 | 761 | and state-leave signals to the watchman service. The enter and leave |
|
766 | 762 | methods can be invoked manually (for scenarios where context manager |
|
767 | 763 | semantics are not possible). If parameters oldnode and newnode are None, |
|
768 | 764 | they will be populated based on current working copy in enter and |
|
769 | 765 | leave, respectively. Similarly, if the distance is none, it will be |
|
770 | 766 | calculated based on the oldnode and newnode in the leave method.""" |
|
771 | 767 | |
|
772 | 768 | def __init__( |
|
773 | 769 | self, |
|
774 | 770 | repo, |
|
775 | 771 | name, |
|
776 | 772 | oldnode=None, |
|
777 | 773 | newnode=None, |
|
778 | 774 | distance=None, |
|
779 | 775 | partial=False, |
|
780 | 776 | ): |
|
781 | 777 | self.repo = repo.unfiltered() |
|
782 | 778 | self.name = name |
|
783 | 779 | self.oldnode = oldnode |
|
784 | 780 | self.newnode = newnode |
|
785 | 781 | self.distance = distance |
|
786 | 782 | self.partial = partial |
|
787 | 783 | self._lock = None |
|
788 | 784 | self.need_leave = False |
|
789 | 785 | |
|
790 | 786 | def __enter__(self): |
|
791 | 787 | self.enter() |
|
792 | 788 | |
|
793 | 789 | def enter(self): |
|
794 | 790 | # Make sure we have a wlock prior to sending notifications to watchman. |
|
795 | 791 | # We don't want to race with other actors. In the update case, |
|
796 | 792 | # merge.update is going to take the wlock almost immediately. We are |
|
797 | 793 | # effectively extending the lock around several short sanity checks. |
|
798 | 794 | if self.oldnode is None: |
|
799 | 795 | self.oldnode = self.repo[b'.'].node() |
|
800 | 796 | |
|
801 | 797 | if self.repo.currentwlock() is None: |
|
802 | 798 | if util.safehasattr(self.repo, b'wlocknostateupdate'): |
|
803 | 799 | self._lock = self.repo.wlocknostateupdate() |
|
804 | 800 | else: |
|
805 | 801 | self._lock = self.repo.wlock() |
|
806 | 802 | self.need_leave = self._state(b'state-enter', hex(self.oldnode)) |
|
807 | 803 | return self |
|
808 | 804 | |
|
809 | 805 | def __exit__(self, type_, value, tb): |
|
810 | 806 | abort = True if type_ else False |
|
811 | 807 | self.exit(abort=abort) |
|
812 | 808 | |
|
813 | 809 | def exit(self, abort=False): |
|
814 | 810 | try: |
|
815 | 811 | if self.need_leave: |
|
816 | 812 | status = b'failed' if abort else b'ok' |
|
817 | 813 | if self.newnode is None: |
|
818 | 814 | self.newnode = self.repo[b'.'].node() |
|
819 | 815 | if self.distance is None: |
|
820 | 816 | self.distance = calcdistance( |
|
821 | 817 | self.repo, self.oldnode, self.newnode |
|
822 | 818 | ) |
|
823 | 819 | self._state(b'state-leave', hex(self.newnode), status=status) |
|
824 | 820 | finally: |
|
825 | 821 | self.need_leave = False |
|
826 | 822 | if self._lock: |
|
827 | 823 | self._lock.release() |
|
828 | 824 | |
|
829 | 825 | def _state(self, cmd, commithash, status=b'ok'): |
|
830 | 826 | if not util.safehasattr(self.repo, b'_watchmanclient'): |
|
831 | 827 | return False |
|
832 | 828 | try: |
|
833 | 829 | self.repo._watchmanclient.command( |
|
834 | 830 | cmd, |
|
835 | 831 | { |
|
836 | 832 | b'name': self.name, |
|
837 | 833 | b'metadata': { |
|
838 | 834 | # the target revision |
|
839 | 835 | b'rev': commithash, |
|
840 | 836 | # approximate number of commits between current and target |
|
841 | 837 | b'distance': self.distance if self.distance else 0, |
|
842 | 838 | # success/failure (only really meaningful for state-leave) |
|
843 | 839 | b'status': status, |
|
844 | 840 | # whether the working copy parent is changing |
|
845 | 841 | b'partial': self.partial, |
|
846 | 842 | }, |
|
847 | 843 | }, |
|
848 | 844 | ) |
|
849 | 845 | return True |
|
850 | 846 | except Exception as e: |
|
851 | 847 | # Swallow any errors; fire and forget |
|
852 | 848 | self.repo.ui.log( |
|
853 | 849 | b'watchman', b'Exception %s while running %s\n', e, cmd |
|
854 | 850 | ) |
|
855 | 851 | return False |
|
856 | 852 | |
|
857 | 853 | |
|
858 | 854 | # Estimate the distance between two nodes |
|
859 | 855 | def calcdistance(repo, oldnode, newnode): |
|
860 | 856 | anc = repo.changelog.ancestor(oldnode, newnode) |
|
861 | 857 | ancrev = repo[anc].rev() |
|
862 | 858 | distance = abs(repo[oldnode].rev() - ancrev) + abs( |
|
863 | 859 | repo[newnode].rev() - ancrev |
|
864 | 860 | ) |
|
865 | 861 | return distance |
|
866 | 862 | |
|
867 | 863 | |
|
868 | 864 | # Bracket working copy updates with calls to the watchman state-enter |
|
869 | 865 | # and state-leave commands. This allows clients to perform more intelligent |
|
870 | 866 | # settling during bulk file change scenarios |
|
871 | 867 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling |
|
872 | 868 | def wrapupdate( |
|
873 | 869 | orig, |
|
874 | 870 | repo, |
|
875 | 871 | node, |
|
876 | 872 | branchmerge, |
|
877 | 873 | force, |
|
878 | 874 | ancestor=None, |
|
879 | 875 | mergeancestor=False, |
|
880 | 876 | labels=None, |
|
881 | 877 | matcher=None, |
|
882 | 878 | **kwargs |
|
883 | 879 | ): |
|
884 | 880 | |
|
885 | 881 | distance = 0 |
|
886 | 882 | partial = True |
|
887 | 883 | oldnode = repo[b'.'].node() |
|
888 | 884 | newnode = repo[node].node() |
|
889 | 885 | if matcher is None or matcher.always(): |
|
890 | 886 | partial = False |
|
891 | 887 | distance = calcdistance(repo.unfiltered(), oldnode, newnode) |
|
892 | 888 | |
|
893 | 889 | with state_update( |
|
894 | 890 | repo, |
|
895 | 891 | name=b"hg.update", |
|
896 | 892 | oldnode=oldnode, |
|
897 | 893 | newnode=newnode, |
|
898 | 894 | distance=distance, |
|
899 | 895 | partial=partial, |
|
900 | 896 | ): |
|
901 | 897 | return orig( |
|
902 | 898 | repo, |
|
903 | 899 | node, |
|
904 | 900 | branchmerge, |
|
905 | 901 | force, |
|
906 | 902 | ancestor, |
|
907 | 903 | mergeancestor, |
|
908 | 904 | labels, |
|
909 | 905 | matcher, |
|
910 | 906 | **kwargs |
|
911 | 907 | ) |
|
912 | 908 | |
|
913 | 909 | |
|
914 | 910 | def repo_has_depth_one_nested_repo(repo): |
|
915 | 911 | for f in repo.wvfs.listdir(): |
|
916 | 912 | if os.path.isdir(os.path.join(repo.root, f, b'.hg')): |
|
917 | 913 | msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n' |
|
918 | 914 | repo.ui.debug(msg % f) |
|
919 | 915 | return True |
|
920 | 916 | return False |
|
921 | 917 | |
|
922 | 918 | |
|
923 | 919 | def reposetup(ui, repo): |
|
924 | 920 | # We don't work with largefiles or inotify |
|
925 | 921 | exts = extensions.enabled() |
|
926 | 922 | for ext in _blacklist: |
|
927 | 923 | if ext in exts: |
|
928 | 924 | ui.warn( |
|
929 | 925 | _( |
|
930 | 926 | b'The fsmonitor extension is incompatible with the %s ' |
|
931 | 927 | b'extension and has been disabled.\n' |
|
932 | 928 | ) |
|
933 | 929 | % ext |
|
934 | 930 | ) |
|
935 | 931 | return |
|
936 | 932 | |
|
937 | 933 | if repo.local(): |
|
938 | 934 | # We don't work with subrepos either. |
|
939 | 935 | # |
|
940 | 936 | # if repo[None].substate can cause a dirstate parse, which is too |
|
941 | 937 | # slow. Instead, look for a file called hgsubstate, |
|
942 | 938 | if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'): |
|
943 | 939 | return |
|
944 | 940 | |
|
945 | 941 | if repo_has_depth_one_nested_repo(repo): |
|
946 | 942 | return |
|
947 | 943 | |
|
948 | 944 | fsmonitorstate = state.state(repo) |
|
949 | 945 | if fsmonitorstate.mode == b'off': |
|
950 | 946 | return |
|
951 | 947 | |
|
952 | 948 | try: |
|
953 | 949 | client = watchmanclient.client(repo.ui, repo.root) |
|
954 | 950 | except Exception as ex: |
|
955 | 951 | _handleunavailable(ui, fsmonitorstate, ex) |
|
956 | 952 | return |
|
957 | 953 | |
|
958 | 954 | repo._fsmonitorstate = fsmonitorstate |
|
959 | 955 | repo._watchmanclient = client |
|
960 | 956 | |
|
961 | 957 | dirstate, cached = localrepo.isfilecached(repo, b'dirstate') |
|
962 | 958 | if cached: |
|
963 | 959 | # at this point since fsmonitorstate wasn't present, |
|
964 | 960 | # repo.dirstate is not a fsmonitordirstate |
|
965 | 961 | makedirstate(repo, dirstate) |
|
966 | 962 | |
|
967 | 963 | class fsmonitorrepo(repo.__class__): |
|
968 | 964 | def status(self, *args, **kwargs): |
|
969 | 965 | orig = super(fsmonitorrepo, self).status |
|
970 | 966 | return overridestatus(orig, self, *args, **kwargs) |
|
971 | 967 | |
|
972 | 968 | def wlocknostateupdate(self, *args, **kwargs): |
|
973 | 969 | return super(fsmonitorrepo, self).wlock(*args, **kwargs) |
|
974 | 970 | |
|
975 | 971 | def wlock(self, *args, **kwargs): |
|
976 | 972 | l = super(fsmonitorrepo, self).wlock(*args, **kwargs) |
|
977 | 973 | if not ui.configbool( |
|
978 | 974 | b"experimental", b"fsmonitor.transaction_notify" |
|
979 | 975 | ): |
|
980 | 976 | return l |
|
981 | 977 | if l.held != 1: |
|
982 | 978 | return l |
|
983 | 979 | origrelease = l.releasefn |
|
984 | 980 | |
|
985 | 981 | def staterelease(): |
|
986 | 982 | if origrelease: |
|
987 | 983 | origrelease() |
|
988 | 984 | if l.stateupdate: |
|
989 | 985 | l.stateupdate.exit() |
|
990 | 986 | l.stateupdate = None |
|
991 | 987 | |
|
992 | 988 | try: |
|
993 | 989 | l.stateupdate = None |
|
994 | 990 | l.stateupdate = state_update(self, name=b"hg.transaction") |
|
995 | 991 | l.stateupdate.enter() |
|
996 | 992 | l.releasefn = staterelease |
|
997 | 993 | except Exception as e: |
|
998 | 994 | # Swallow any errors; fire and forget |
|
999 | 995 | self.ui.log( |
|
1000 | 996 | b'watchman', b'Exception in state update %s\n', e |
|
1001 | 997 | ) |
|
1002 | 998 | return l |
|
1003 | 999 | |
|
1004 | 1000 | repo.__class__ = fsmonitorrepo |
@@ -1,1269 +1,1269 b'' | |||
|
1 | 1 | # githelp.py - Try to map Git commands to Mercurial equivalents. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """try mapping git commands to Mercurial commands |
|
8 | 8 | |
|
9 | 9 | Tries to map a given git command to a Mercurial command: |
|
10 | 10 | |
|
11 | 11 | $ hg githelp -- git checkout master |
|
12 | 12 | hg update master |
|
13 | 13 | |
|
14 | 14 | If an unknown command or parameter combination is detected, an error is |
|
15 | 15 | produced. |
|
16 | 16 | """ |
|
17 | 17 | |
|
18 | 18 | |
|
19 | 19 | import getopt |
|
20 | 20 | import re |
|
21 | 21 | |
|
22 | 22 | from mercurial.i18n import _ |
|
23 | 23 | from mercurial import ( |
|
24 | 24 | encoding, |
|
25 | 25 | error, |
|
26 | 26 | fancyopts, |
|
27 | 27 | pycompat, |
|
28 | 28 | registrar, |
|
29 | 29 | scmutil, |
|
30 | 30 | ) |
|
31 | 31 | from mercurial.utils import procutil |
|
32 | 32 | |
|
33 | 33 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
34 | 34 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
35 | 35 | # be specifying the version(s) of Mercurial they are tested with, or |
|
36 | 36 | # leave the attribute unspecified. |
|
37 | 37 | testedwith = b'ships-with-hg-core' |
|
38 | 38 | |
|
39 | 39 | cmdtable = {} |
|
40 | 40 | command = registrar.command(cmdtable) |
|
41 | 41 | |
|
42 | 42 | |
|
43 | 43 | def convert(s): |
|
44 | 44 | if s.startswith(b"origin/"): |
|
45 | 45 | return s[7:] |
|
46 | 46 | if b'HEAD' in s: |
|
47 | 47 | s = s.replace(b'HEAD', b'.') |
|
48 | 48 | # HEAD~ in git is .~1 in mercurial |
|
49 | 49 | s = re.sub(b'~$', b'~1', s) |
|
50 | 50 | return s |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | @command( |
|
54 | 54 | b'githelp|git', |
|
55 | 55 | [], |
|
56 | 56 | _(b'hg githelp'), |
|
57 | 57 | helpcategory=command.CATEGORY_HELP, |
|
58 | 58 | helpbasic=True, |
|
59 | 59 | ) |
|
60 | 60 | def githelp(ui, repo, *args, **kwargs): |
|
61 | 61 | """suggests the Mercurial equivalent of the given git command |
|
62 | 62 | |
|
63 | 63 | Usage: hg githelp -- <git command> |
|
64 | 64 | """ |
|
65 | 65 | |
|
66 | 66 | if len(args) == 0 or (len(args) == 1 and args[0] == b'git'): |
|
67 | 67 | raise error.Abort( |
|
68 | 68 | _(b'missing git command - usage: hg githelp -- <git command>') |
|
69 | 69 | ) |
|
70 | 70 | |
|
71 | 71 | if args[0] == b'git': |
|
72 | 72 | args = args[1:] |
|
73 | 73 | |
|
74 | 74 | cmd = args[0] |
|
75 | 75 | if not cmd in gitcommands: |
|
76 | 76 | raise error.Abort(_(b"error: unknown git command %s") % cmd) |
|
77 | 77 | |
|
78 | 78 | ui.pager(b'githelp') |
|
79 | 79 | args = args[1:] |
|
80 | 80 | return gitcommands[cmd](ui, repo, *args, **kwargs) |
|
81 | 81 | |
|
82 | 82 | |
|
83 | 83 | def parseoptions(ui, cmdoptions, args): |
|
84 | 84 | cmdoptions = list(cmdoptions) |
|
85 | 85 | opts = {} |
|
86 | 86 | args = list(args) |
|
87 | 87 | while True: |
|
88 | 88 | try: |
|
89 | 89 | args = fancyopts.fancyopts(list(args), cmdoptions, opts, True) |
|
90 | 90 | break |
|
91 | 91 | except getopt.GetoptError as ex: |
|
92 | 92 | if "requires argument" in ex.msg: |
|
93 | 93 | raise |
|
94 | 94 | if ('--' + ex.opt) in ex.msg: |
|
95 | 95 | flag = b'--' + pycompat.bytestr(ex.opt) |
|
96 | 96 | elif ('-' + ex.opt) in ex.msg: |
|
97 | 97 | flag = b'-' + pycompat.bytestr(ex.opt) |
|
98 | 98 | else: |
|
99 | 99 | raise error.Abort( |
|
100 | 100 | _(b"unknown option %s") % pycompat.bytestr(ex.opt) |
|
101 | 101 | ) |
|
102 | 102 | try: |
|
103 | 103 | args.remove(flag) |
|
104 | 104 | except Exception: |
|
105 | 105 | msg = _(b"unknown option '%s' packed with other options") |
|
106 | 106 | hint = _(b"please try passing the option as its own flag: -%s") |
|
107 | 107 | raise error.Abort( |
|
108 | 108 | msg % pycompat.bytestr(ex.opt), |
|
109 | 109 | hint=hint % pycompat.bytestr(ex.opt), |
|
110 | 110 | ) |
|
111 | 111 | |
|
112 | 112 | ui.warn(_(b"ignoring unknown option %s\n") % flag) |
|
113 | 113 | |
|
114 | 114 | args = list([convert(x) for x in args]) |
|
115 | 115 | opts = dict( |
|
116 | 116 | [ |
|
117 | 117 | (k, convert(v)) if isinstance(v, bytes) else (k, v) |
|
118 |
for k, v in |
|
|
118 | for k, v in opts.items() | |
|
119 | 119 | ] |
|
120 | 120 | ) |
|
121 | 121 | |
|
122 | 122 | return args, opts |
|
123 | 123 | |
|
124 | 124 | |
|
125 | 125 | class Command(object): |
|
126 | 126 | def __init__(self, name): |
|
127 | 127 | self.name = name |
|
128 | 128 | self.args = [] |
|
129 | 129 | self.opts = {} |
|
130 | 130 | |
|
131 | 131 | def __bytes__(self): |
|
132 | 132 | cmd = b"hg " + self.name |
|
133 | 133 | if self.opts: |
|
134 |
for k, values in sorted( |
|
|
134 | for k, values in sorted(self.opts.items()): | |
|
135 | 135 | for v in values: |
|
136 | 136 | if v: |
|
137 | 137 | if isinstance(v, int): |
|
138 | 138 | fmt = b' %s %d' |
|
139 | 139 | else: |
|
140 | 140 | fmt = b' %s %s' |
|
141 | 141 | |
|
142 | 142 | cmd += fmt % (k, v) |
|
143 | 143 | else: |
|
144 | 144 | cmd += b" %s" % (k,) |
|
145 | 145 | if self.args: |
|
146 | 146 | cmd += b" " |
|
147 | 147 | cmd += b" ".join(self.args) |
|
148 | 148 | return cmd |
|
149 | 149 | |
|
150 | 150 | __str__ = encoding.strmethod(__bytes__) |
|
151 | 151 | |
|
152 | 152 | def append(self, value): |
|
153 | 153 | self.args.append(value) |
|
154 | 154 | |
|
155 | 155 | def extend(self, values): |
|
156 | 156 | self.args.extend(values) |
|
157 | 157 | |
|
158 | 158 | def __setitem__(self, key, value): |
|
159 | 159 | values = self.opts.setdefault(key, []) |
|
160 | 160 | values.append(value) |
|
161 | 161 | |
|
162 | 162 | def __and__(self, other): |
|
163 | 163 | return AndCommand(self, other) |
|
164 | 164 | |
|
165 | 165 | |
|
166 | 166 | class AndCommand(object): |
|
167 | 167 | def __init__(self, left, right): |
|
168 | 168 | self.left = left |
|
169 | 169 | self.right = right |
|
170 | 170 | |
|
171 | 171 | def __str__(self): |
|
172 | 172 | return b"%s && %s" % (self.left, self.right) |
|
173 | 173 | |
|
174 | 174 | def __and__(self, other): |
|
175 | 175 | return AndCommand(self, other) |
|
176 | 176 | |
|
177 | 177 | |
|
178 | 178 | def add(ui, repo, *args, **kwargs): |
|
179 | 179 | cmdoptions = [ |
|
180 | 180 | (b'A', b'all', None, b''), |
|
181 | 181 | (b'p', b'patch', None, b''), |
|
182 | 182 | ] |
|
183 | 183 | args, opts = parseoptions(ui, cmdoptions, args) |
|
184 | 184 | |
|
185 | 185 | if opts.get(b'patch'): |
|
186 | 186 | ui.status( |
|
187 | 187 | _( |
|
188 | 188 | b"note: Mercurial will commit when complete, " |
|
189 | 189 | b"as there is no staging area in Mercurial\n\n" |
|
190 | 190 | ) |
|
191 | 191 | ) |
|
192 | 192 | cmd = Command(b'commit --interactive') |
|
193 | 193 | else: |
|
194 | 194 | cmd = Command(b"add") |
|
195 | 195 | |
|
196 | 196 | if not opts.get(b'all'): |
|
197 | 197 | cmd.extend(args) |
|
198 | 198 | else: |
|
199 | 199 | ui.status( |
|
200 | 200 | _( |
|
201 | 201 | b"note: use hg addremove to remove files that have " |
|
202 | 202 | b"been deleted\n\n" |
|
203 | 203 | ) |
|
204 | 204 | ) |
|
205 | 205 | |
|
206 | 206 | ui.status((bytes(cmd)), b"\n") |
|
207 | 207 | |
|
208 | 208 | |
|
209 | 209 | def am(ui, repo, *args, **kwargs): |
|
210 | 210 | cmdoptions = [] |
|
211 | 211 | parseoptions(ui, cmdoptions, args) |
|
212 | 212 | cmd = Command(b'import') |
|
213 | 213 | ui.status(bytes(cmd), b"\n") |
|
214 | 214 | |
|
215 | 215 | |
|
216 | 216 | def apply(ui, repo, *args, **kwargs): |
|
217 | 217 | cmdoptions = [ |
|
218 | 218 | (b'p', b'p', int, b''), |
|
219 | 219 | (b'', b'directory', b'', b''), |
|
220 | 220 | ] |
|
221 | 221 | args, opts = parseoptions(ui, cmdoptions, args) |
|
222 | 222 | |
|
223 | 223 | cmd = Command(b'import --no-commit') |
|
224 | 224 | if opts.get(b'p'): |
|
225 | 225 | cmd[b'-p'] = opts.get(b'p') |
|
226 | 226 | if opts.get(b'directory'): |
|
227 | 227 | cmd[b'--prefix'] = opts.get(b'directory') |
|
228 | 228 | cmd.extend(args) |
|
229 | 229 | |
|
230 | 230 | ui.status((bytes(cmd)), b"\n") |
|
231 | 231 | |
|
232 | 232 | |
|
233 | 233 | def bisect(ui, repo, *args, **kwargs): |
|
234 | 234 | ui.status(_(b"see 'hg help bisect' for how to use bisect\n\n")) |
|
235 | 235 | |
|
236 | 236 | |
|
237 | 237 | def blame(ui, repo, *args, **kwargs): |
|
238 | 238 | cmdoptions = [] |
|
239 | 239 | args, opts = parseoptions(ui, cmdoptions, args) |
|
240 | 240 | cmd = Command(b'annotate -udl') |
|
241 | 241 | cmd.extend([convert(v) for v in args]) |
|
242 | 242 | ui.status((bytes(cmd)), b"\n") |
|
243 | 243 | |
|
244 | 244 | |
|
245 | 245 | def branch(ui, repo, *args, **kwargs): |
|
246 | 246 | cmdoptions = [ |
|
247 | 247 | (b'', b'set-upstream', None, b''), |
|
248 | 248 | (b'', b'set-upstream-to', b'', b''), |
|
249 | 249 | (b'd', b'delete', None, b''), |
|
250 | 250 | (b'D', b'delete', None, b''), |
|
251 | 251 | (b'm', b'move', None, b''), |
|
252 | 252 | (b'M', b'move', None, b''), |
|
253 | 253 | ] |
|
254 | 254 | args, opts = parseoptions(ui, cmdoptions, args) |
|
255 | 255 | |
|
256 | 256 | cmd = Command(b"bookmark") |
|
257 | 257 | |
|
258 | 258 | if opts.get(b'set_upstream') or opts.get(b'set_upstream_to'): |
|
259 | 259 | ui.status(_(b"Mercurial has no concept of upstream branches\n")) |
|
260 | 260 | return |
|
261 | 261 | elif opts.get(b'delete'): |
|
262 | 262 | cmd = Command(b"strip") |
|
263 | 263 | for branch in args: |
|
264 | 264 | cmd[b'-B'] = branch |
|
265 | 265 | else: |
|
266 | 266 | cmd[b'-B'] = None |
|
267 | 267 | elif opts.get(b'move'): |
|
268 | 268 | if len(args) > 0: |
|
269 | 269 | if len(args) > 1: |
|
270 | 270 | old = args.pop(0) |
|
271 | 271 | else: |
|
272 | 272 | # shell command to output the active bookmark for the active |
|
273 | 273 | # revision |
|
274 | 274 | old = b'`hg log -T"{activebookmark}" -r .`' |
|
275 | 275 | else: |
|
276 | 276 | raise error.Abort(_(b'missing newbranch argument')) |
|
277 | 277 | new = args[0] |
|
278 | 278 | cmd[b'-m'] = old |
|
279 | 279 | cmd.append(new) |
|
280 | 280 | else: |
|
281 | 281 | if len(args) > 1: |
|
282 | 282 | cmd[b'-r'] = args[1] |
|
283 | 283 | cmd.append(args[0]) |
|
284 | 284 | elif len(args) == 1: |
|
285 | 285 | cmd.append(args[0]) |
|
286 | 286 | ui.status((bytes(cmd)), b"\n") |
|
287 | 287 | |
|
288 | 288 | |
|
289 | 289 | def ispath(repo, string): |
|
290 | 290 | """ |
|
291 | 291 | The first argument to git checkout can either be a revision or a path. Let's |
|
292 | 292 | generally assume it's a revision, unless it's obviously a path. There are |
|
293 | 293 | too many ways to spell revisions in git for us to reasonably catch all of |
|
294 | 294 | them, so let's be conservative. |
|
295 | 295 | """ |
|
296 | 296 | if scmutil.isrevsymbol(repo, string): |
|
297 | 297 | # if it's definitely a revision let's not even check if a file of the |
|
298 | 298 | # same name exists. |
|
299 | 299 | return False |
|
300 | 300 | |
|
301 | 301 | cwd = repo.getcwd() |
|
302 | 302 | if cwd == b'': |
|
303 | 303 | repopath = string |
|
304 | 304 | else: |
|
305 | 305 | repopath = cwd + b'/' + string |
|
306 | 306 | |
|
307 | 307 | exists = repo.wvfs.exists(repopath) |
|
308 | 308 | if exists: |
|
309 | 309 | return True |
|
310 | 310 | |
|
311 | 311 | manifest = repo[b'.'].manifest() |
|
312 | 312 | |
|
313 | 313 | didexist = (repopath in manifest) or manifest.hasdir(repopath) |
|
314 | 314 | |
|
315 | 315 | return didexist |
|
316 | 316 | |
|
317 | 317 | |
|
318 | 318 | def checkout(ui, repo, *args, **kwargs): |
|
319 | 319 | cmdoptions = [ |
|
320 | 320 | (b'b', b'branch', b'', b''), |
|
321 | 321 | (b'B', b'branch', b'', b''), |
|
322 | 322 | (b'f', b'force', None, b''), |
|
323 | 323 | (b'p', b'patch', None, b''), |
|
324 | 324 | ] |
|
325 | 325 | paths = [] |
|
326 | 326 | if b'--' in args: |
|
327 | 327 | sepindex = args.index(b'--') |
|
328 | 328 | paths.extend(args[sepindex + 1 :]) |
|
329 | 329 | args = args[:sepindex] |
|
330 | 330 | |
|
331 | 331 | args, opts = parseoptions(ui, cmdoptions, args) |
|
332 | 332 | |
|
333 | 333 | rev = None |
|
334 | 334 | if args and ispath(repo, args[0]): |
|
335 | 335 | paths = args + paths |
|
336 | 336 | elif args: |
|
337 | 337 | rev = args[0] |
|
338 | 338 | paths = args[1:] + paths |
|
339 | 339 | |
|
340 | 340 | cmd = Command(b'update') |
|
341 | 341 | |
|
342 | 342 | if opts.get(b'force'): |
|
343 | 343 | if paths or rev: |
|
344 | 344 | cmd[b'-C'] = None |
|
345 | 345 | |
|
346 | 346 | if opts.get(b'patch'): |
|
347 | 347 | cmd = Command(b'revert') |
|
348 | 348 | cmd[b'-i'] = None |
|
349 | 349 | |
|
350 | 350 | if opts.get(b'branch'): |
|
351 | 351 | if len(args) == 0: |
|
352 | 352 | cmd = Command(b'bookmark') |
|
353 | 353 | cmd.append(opts.get(b'branch')) |
|
354 | 354 | else: |
|
355 | 355 | cmd.append(args[0]) |
|
356 | 356 | bookcmd = Command(b'bookmark') |
|
357 | 357 | bookcmd.append(opts.get(b'branch')) |
|
358 | 358 | cmd = cmd & bookcmd |
|
359 | 359 | # if there is any path argument supplied, use revert instead of update |
|
360 | 360 | elif len(paths) > 0: |
|
361 | 361 | ui.status(_(b"note: use --no-backup to avoid creating .orig files\n\n")) |
|
362 | 362 | cmd = Command(b'revert') |
|
363 | 363 | if opts.get(b'patch'): |
|
364 | 364 | cmd[b'-i'] = None |
|
365 | 365 | if rev: |
|
366 | 366 | cmd[b'-r'] = rev |
|
367 | 367 | cmd.extend(paths) |
|
368 | 368 | elif rev: |
|
369 | 369 | if opts.get(b'patch'): |
|
370 | 370 | cmd[b'-r'] = rev |
|
371 | 371 | else: |
|
372 | 372 | cmd.append(rev) |
|
373 | 373 | elif opts.get(b'force'): |
|
374 | 374 | cmd = Command(b'revert') |
|
375 | 375 | cmd[b'--all'] = None |
|
376 | 376 | else: |
|
377 | 377 | raise error.Abort(_(b"a commit must be specified")) |
|
378 | 378 | |
|
379 | 379 | ui.status((bytes(cmd)), b"\n") |
|
380 | 380 | |
|
381 | 381 | |
|
382 | 382 | def cherrypick(ui, repo, *args, **kwargs): |
|
383 | 383 | cmdoptions = [ |
|
384 | 384 | (b'', b'continue', None, b''), |
|
385 | 385 | (b'', b'abort', None, b''), |
|
386 | 386 | (b'e', b'edit', None, b''), |
|
387 | 387 | ] |
|
388 | 388 | args, opts = parseoptions(ui, cmdoptions, args) |
|
389 | 389 | |
|
390 | 390 | cmd = Command(b'graft') |
|
391 | 391 | |
|
392 | 392 | if opts.get(b'edit'): |
|
393 | 393 | cmd[b'--edit'] = None |
|
394 | 394 | if opts.get(b'continue'): |
|
395 | 395 | cmd[b'--continue'] = None |
|
396 | 396 | elif opts.get(b'abort'): |
|
397 | 397 | ui.status(_(b"note: hg graft does not have --abort\n\n")) |
|
398 | 398 | return |
|
399 | 399 | else: |
|
400 | 400 | cmd.extend(args) |
|
401 | 401 | |
|
402 | 402 | ui.status((bytes(cmd)), b"\n") |
|
403 | 403 | |
|
404 | 404 | |
|
405 | 405 | def clean(ui, repo, *args, **kwargs): |
|
406 | 406 | cmdoptions = [ |
|
407 | 407 | (b'd', b'd', None, b''), |
|
408 | 408 | (b'f', b'force', None, b''), |
|
409 | 409 | (b'x', b'x', None, b''), |
|
410 | 410 | ] |
|
411 | 411 | args, opts = parseoptions(ui, cmdoptions, args) |
|
412 | 412 | |
|
413 | 413 | cmd = Command(b'purge') |
|
414 | 414 | if opts.get(b'x'): |
|
415 | 415 | cmd[b'--all'] = None |
|
416 | 416 | cmd.extend(args) |
|
417 | 417 | |
|
418 | 418 | ui.status((bytes(cmd)), b"\n") |
|
419 | 419 | |
|
420 | 420 | |
|
421 | 421 | def clone(ui, repo, *args, **kwargs): |
|
422 | 422 | cmdoptions = [ |
|
423 | 423 | (b'', b'bare', None, b''), |
|
424 | 424 | (b'n', b'no-checkout', None, b''), |
|
425 | 425 | (b'b', b'branch', b'', b''), |
|
426 | 426 | ] |
|
427 | 427 | args, opts = parseoptions(ui, cmdoptions, args) |
|
428 | 428 | |
|
429 | 429 | if len(args) == 0: |
|
430 | 430 | raise error.Abort(_(b"a repository to clone must be specified")) |
|
431 | 431 | |
|
432 | 432 | cmd = Command(b'clone') |
|
433 | 433 | cmd.append(args[0]) |
|
434 | 434 | if len(args) > 1: |
|
435 | 435 | cmd.append(args[1]) |
|
436 | 436 | |
|
437 | 437 | if opts.get(b'bare'): |
|
438 | 438 | cmd[b'-U'] = None |
|
439 | 439 | ui.status( |
|
440 | 440 | _( |
|
441 | 441 | b"note: Mercurial does not have bare clones. " |
|
442 | 442 | b"-U will clone the repo without checking out a commit\n\n" |
|
443 | 443 | ) |
|
444 | 444 | ) |
|
445 | 445 | elif opts.get(b'no_checkout'): |
|
446 | 446 | cmd[b'-U'] = None |
|
447 | 447 | |
|
448 | 448 | if opts.get(b'branch'): |
|
449 | 449 | cocmd = Command(b"update") |
|
450 | 450 | cocmd.append(opts.get(b'branch')) |
|
451 | 451 | cmd = cmd & cocmd |
|
452 | 452 | |
|
453 | 453 | ui.status((bytes(cmd)), b"\n") |
|
454 | 454 | |
|
455 | 455 | |
|
456 | 456 | def commit(ui, repo, *args, **kwargs): |
|
457 | 457 | cmdoptions = [ |
|
458 | 458 | (b'a', b'all', None, b''), |
|
459 | 459 | (b'm', b'message', b'', b''), |
|
460 | 460 | (b'p', b'patch', None, b''), |
|
461 | 461 | (b'C', b'reuse-message', b'', b''), |
|
462 | 462 | (b'F', b'file', b'', b''), |
|
463 | 463 | (b'', b'author', b'', b''), |
|
464 | 464 | (b'', b'date', b'', b''), |
|
465 | 465 | (b'', b'amend', None, b''), |
|
466 | 466 | (b'', b'no-edit', None, b''), |
|
467 | 467 | ] |
|
468 | 468 | args, opts = parseoptions(ui, cmdoptions, args) |
|
469 | 469 | |
|
470 | 470 | cmd = Command(b'commit') |
|
471 | 471 | if opts.get(b'patch'): |
|
472 | 472 | cmd = Command(b'commit --interactive') |
|
473 | 473 | |
|
474 | 474 | if opts.get(b'amend'): |
|
475 | 475 | if opts.get(b'no_edit'): |
|
476 | 476 | cmd = Command(b'amend') |
|
477 | 477 | else: |
|
478 | 478 | cmd[b'--amend'] = None |
|
479 | 479 | |
|
480 | 480 | if opts.get(b'reuse_message'): |
|
481 | 481 | cmd[b'-M'] = opts.get(b'reuse_message') |
|
482 | 482 | |
|
483 | 483 | if opts.get(b'message'): |
|
484 | 484 | cmd[b'-m'] = b"'%s'" % (opts.get(b'message'),) |
|
485 | 485 | |
|
486 | 486 | if opts.get(b'all'): |
|
487 | 487 | ui.status( |
|
488 | 488 | _( |
|
489 | 489 | b"note: Mercurial doesn't have a staging area, " |
|
490 | 490 | b"so there is no --all. -A will add and remove files " |
|
491 | 491 | b"for you though.\n\n" |
|
492 | 492 | ) |
|
493 | 493 | ) |
|
494 | 494 | |
|
495 | 495 | if opts.get(b'file'): |
|
496 | 496 | cmd[b'-l'] = opts.get(b'file') |
|
497 | 497 | |
|
498 | 498 | if opts.get(b'author'): |
|
499 | 499 | cmd[b'-u'] = opts.get(b'author') |
|
500 | 500 | |
|
501 | 501 | if opts.get(b'date'): |
|
502 | 502 | cmd[b'-d'] = opts.get(b'date') |
|
503 | 503 | |
|
504 | 504 | cmd.extend(args) |
|
505 | 505 | |
|
506 | 506 | ui.status((bytes(cmd)), b"\n") |
|
507 | 507 | |
|
508 | 508 | |
|
509 | 509 | def deprecated(ui, repo, *args, **kwargs): |
|
510 | 510 | ui.warn( |
|
511 | 511 | _( |
|
512 | 512 | b'this command has been deprecated in the git project, ' |
|
513 | 513 | b'thus isn\'t supported by this tool\n\n' |
|
514 | 514 | ) |
|
515 | 515 | ) |
|
516 | 516 | |
|
517 | 517 | |
|
518 | 518 | def diff(ui, repo, *args, **kwargs): |
|
519 | 519 | cmdoptions = [ |
|
520 | 520 | (b'a', b'all', None, b''), |
|
521 | 521 | (b'', b'cached', None, b''), |
|
522 | 522 | (b'R', b'reverse', None, b''), |
|
523 | 523 | ] |
|
524 | 524 | args, opts = parseoptions(ui, cmdoptions, args) |
|
525 | 525 | |
|
526 | 526 | cmd = Command(b'diff') |
|
527 | 527 | |
|
528 | 528 | if opts.get(b'cached'): |
|
529 | 529 | ui.status( |
|
530 | 530 | _( |
|
531 | 531 | b'note: Mercurial has no concept of a staging area, ' |
|
532 | 532 | b'so --cached does nothing\n\n' |
|
533 | 533 | ) |
|
534 | 534 | ) |
|
535 | 535 | |
|
536 | 536 | if opts.get(b'reverse'): |
|
537 | 537 | cmd[b'--reverse'] = None |
|
538 | 538 | |
|
539 | 539 | for a in list(args): |
|
540 | 540 | args.remove(a) |
|
541 | 541 | try: |
|
542 | 542 | repo.revs(a) |
|
543 | 543 | cmd[b'-r'] = a |
|
544 | 544 | except Exception: |
|
545 | 545 | cmd.append(a) |
|
546 | 546 | |
|
547 | 547 | ui.status((bytes(cmd)), b"\n") |
|
548 | 548 | |
|
549 | 549 | |
|
550 | 550 | def difftool(ui, repo, *args, **kwargs): |
|
551 | 551 | ui.status( |
|
552 | 552 | _( |
|
553 | 553 | b'Mercurial does not enable external difftool by default. You ' |
|
554 | 554 | b'need to enable the extdiff extension in your .hgrc file by adding\n' |
|
555 | 555 | b'extdiff =\n' |
|
556 | 556 | b'to the [extensions] section and then running\n\n' |
|
557 | 557 | b'hg extdiff -p <program>\n\n' |
|
558 | 558 | b'See \'hg help extdiff\' and \'hg help -e extdiff\' for more ' |
|
559 | 559 | b'information.\n' |
|
560 | 560 | ) |
|
561 | 561 | ) |
|
562 | 562 | |
|
563 | 563 | |
|
564 | 564 | def fetch(ui, repo, *args, **kwargs): |
|
565 | 565 | cmdoptions = [ |
|
566 | 566 | (b'', b'all', None, b''), |
|
567 | 567 | (b'f', b'force', None, b''), |
|
568 | 568 | ] |
|
569 | 569 | args, opts = parseoptions(ui, cmdoptions, args) |
|
570 | 570 | |
|
571 | 571 | cmd = Command(b'pull') |
|
572 | 572 | |
|
573 | 573 | if len(args) > 0: |
|
574 | 574 | cmd.append(args[0]) |
|
575 | 575 | if len(args) > 1: |
|
576 | 576 | ui.status( |
|
577 | 577 | _( |
|
578 | 578 | b"note: Mercurial doesn't have refspecs. " |
|
579 | 579 | b"-r can be used to specify which commits you want to " |
|
580 | 580 | b"pull. -B can be used to specify which bookmark you " |
|
581 | 581 | b"want to pull.\n\n" |
|
582 | 582 | ) |
|
583 | 583 | ) |
|
584 | 584 | for v in args[1:]: |
|
585 | 585 | if v in repo._bookmarks: |
|
586 | 586 | cmd[b'-B'] = v |
|
587 | 587 | else: |
|
588 | 588 | cmd[b'-r'] = v |
|
589 | 589 | |
|
590 | 590 | ui.status((bytes(cmd)), b"\n") |
|
591 | 591 | |
|
592 | 592 | |
|
593 | 593 | def grep(ui, repo, *args, **kwargs): |
|
594 | 594 | cmdoptions = [] |
|
595 | 595 | args, opts = parseoptions(ui, cmdoptions, args) |
|
596 | 596 | |
|
597 | 597 | cmd = Command(b'grep') |
|
598 | 598 | |
|
599 | 599 | # For basic usage, git grep and hg grep are the same. They both have the |
|
600 | 600 | # pattern first, followed by paths. |
|
601 | 601 | cmd.extend(args) |
|
602 | 602 | |
|
603 | 603 | ui.status((bytes(cmd)), b"\n") |
|
604 | 604 | |
|
605 | 605 | |
|
606 | 606 | def init(ui, repo, *args, **kwargs): |
|
607 | 607 | cmdoptions = [] |
|
608 | 608 | args, opts = parseoptions(ui, cmdoptions, args) |
|
609 | 609 | |
|
610 | 610 | cmd = Command(b'init') |
|
611 | 611 | |
|
612 | 612 | if len(args) > 0: |
|
613 | 613 | cmd.append(args[0]) |
|
614 | 614 | |
|
615 | 615 | ui.status((bytes(cmd)), b"\n") |
|
616 | 616 | |
|
617 | 617 | |
|
618 | 618 | def log(ui, repo, *args, **kwargs): |
|
619 | 619 | cmdoptions = [ |
|
620 | 620 | (b'', b'follow', None, b''), |
|
621 | 621 | (b'', b'decorate', None, b''), |
|
622 | 622 | (b'n', b'number', b'', b''), |
|
623 | 623 | (b'1', b'1', None, b''), |
|
624 | 624 | (b'', b'pretty', b'', b''), |
|
625 | 625 | (b'', b'format', b'', b''), |
|
626 | 626 | (b'', b'oneline', None, b''), |
|
627 | 627 | (b'', b'stat', None, b''), |
|
628 | 628 | (b'', b'graph', None, b''), |
|
629 | 629 | (b'p', b'patch', None, b''), |
|
630 | 630 | (b'G', b'grep-diff', b'', b''), |
|
631 | 631 | (b'S', b'pickaxe-regex', b'', b''), |
|
632 | 632 | ] |
|
633 | 633 | args, opts = parseoptions(ui, cmdoptions, args) |
|
634 | 634 | grep_pat = opts.get(b'grep_diff') or opts.get(b'pickaxe_regex') |
|
635 | 635 | if grep_pat: |
|
636 | 636 | cmd = Command(b'grep') |
|
637 | 637 | cmd[b'--diff'] = grep_pat |
|
638 | 638 | ui.status(b'%s\n' % bytes(cmd)) |
|
639 | 639 | return |
|
640 | 640 | |
|
641 | 641 | ui.status( |
|
642 | 642 | _( |
|
643 | 643 | b'note: -v prints the entire commit message like Git does. To ' |
|
644 | 644 | b'print just the first line, drop the -v.\n\n' |
|
645 | 645 | ) |
|
646 | 646 | ) |
|
647 | 647 | ui.status( |
|
648 | 648 | _( |
|
649 | 649 | b"note: see hg help revset for information on how to filter " |
|
650 | 650 | b"log output\n\n" |
|
651 | 651 | ) |
|
652 | 652 | ) |
|
653 | 653 | |
|
654 | 654 | cmd = Command(b'log') |
|
655 | 655 | cmd[b'-v'] = None |
|
656 | 656 | |
|
657 | 657 | if opts.get(b'number'): |
|
658 | 658 | cmd[b'-l'] = opts.get(b'number') |
|
659 | 659 | if opts.get(b'1'): |
|
660 | 660 | cmd[b'-l'] = b'1' |
|
661 | 661 | if opts.get(b'stat'): |
|
662 | 662 | cmd[b'--stat'] = None |
|
663 | 663 | if opts.get(b'graph'): |
|
664 | 664 | cmd[b'-G'] = None |
|
665 | 665 | if opts.get(b'patch'): |
|
666 | 666 | cmd[b'-p'] = None |
|
667 | 667 | |
|
668 | 668 | if opts.get(b'pretty') or opts.get(b'format') or opts.get(b'oneline'): |
|
669 | 669 | format = opts.get(b'format', b'') |
|
670 | 670 | if b'format:' in format: |
|
671 | 671 | ui.status( |
|
672 | 672 | _( |
|
673 | 673 | b"note: --format format:??? equates to Mercurial's " |
|
674 | 674 | b"--template. See hg help templates for more info.\n\n" |
|
675 | 675 | ) |
|
676 | 676 | ) |
|
677 | 677 | cmd[b'--template'] = b'???' |
|
678 | 678 | else: |
|
679 | 679 | ui.status( |
|
680 | 680 | _( |
|
681 | 681 | b"note: --pretty/format/oneline equate to Mercurial's " |
|
682 | 682 | b"--style or --template. See hg help templates for " |
|
683 | 683 | b"more info.\n\n" |
|
684 | 684 | ) |
|
685 | 685 | ) |
|
686 | 686 | cmd[b'--style'] = b'???' |
|
687 | 687 | |
|
688 | 688 | if len(args) > 0: |
|
689 | 689 | if b'..' in args[0]: |
|
690 | 690 | since, until = args[0].split(b'..') |
|
691 | 691 | cmd[b'-r'] = b"'%s::%s'" % (since, until) |
|
692 | 692 | del args[0] |
|
693 | 693 | cmd.extend(args) |
|
694 | 694 | |
|
695 | 695 | ui.status((bytes(cmd)), b"\n") |
|
696 | 696 | |
|
697 | 697 | |
|
698 | 698 | def lsfiles(ui, repo, *args, **kwargs): |
|
699 | 699 | cmdoptions = [ |
|
700 | 700 | (b'c', b'cached', None, b''), |
|
701 | 701 | (b'd', b'deleted', None, b''), |
|
702 | 702 | (b'm', b'modified', None, b''), |
|
703 | 703 | (b'o', b'others', None, b''), |
|
704 | 704 | (b'i', b'ignored', None, b''), |
|
705 | 705 | (b's', b'stage', None, b''), |
|
706 | 706 | (b'z', b'_zero', None, b''), |
|
707 | 707 | ] |
|
708 | 708 | args, opts = parseoptions(ui, cmdoptions, args) |
|
709 | 709 | |
|
710 | 710 | if ( |
|
711 | 711 | opts.get(b'modified') |
|
712 | 712 | or opts.get(b'deleted') |
|
713 | 713 | or opts.get(b'others') |
|
714 | 714 | or opts.get(b'ignored') |
|
715 | 715 | ): |
|
716 | 716 | cmd = Command(b'status') |
|
717 | 717 | if opts.get(b'deleted'): |
|
718 | 718 | cmd[b'-d'] = None |
|
719 | 719 | if opts.get(b'modified'): |
|
720 | 720 | cmd[b'-m'] = None |
|
721 | 721 | if opts.get(b'others'): |
|
722 | 722 | cmd[b'-o'] = None |
|
723 | 723 | if opts.get(b'ignored'): |
|
724 | 724 | cmd[b'-i'] = None |
|
725 | 725 | else: |
|
726 | 726 | cmd = Command(b'files') |
|
727 | 727 | if opts.get(b'stage'): |
|
728 | 728 | ui.status( |
|
729 | 729 | _( |
|
730 | 730 | b"note: Mercurial doesn't have a staging area, ignoring " |
|
731 | 731 | b"--stage\n" |
|
732 | 732 | ) |
|
733 | 733 | ) |
|
734 | 734 | if opts.get(b'_zero'): |
|
735 | 735 | cmd[b'-0'] = None |
|
736 | 736 | cmd.append(b'.') |
|
737 | 737 | for include in args: |
|
738 | 738 | cmd[b'-I'] = procutil.shellquote(include) |
|
739 | 739 | |
|
740 | 740 | ui.status((bytes(cmd)), b"\n") |
|
741 | 741 | |
|
742 | 742 | |
|
743 | 743 | def merge(ui, repo, *args, **kwargs): |
|
744 | 744 | cmdoptions = [] |
|
745 | 745 | args, opts = parseoptions(ui, cmdoptions, args) |
|
746 | 746 | |
|
747 | 747 | cmd = Command(b'merge') |
|
748 | 748 | |
|
749 | 749 | if len(args) > 0: |
|
750 | 750 | cmd.append(args[len(args) - 1]) |
|
751 | 751 | |
|
752 | 752 | ui.status((bytes(cmd)), b"\n") |
|
753 | 753 | |
|
754 | 754 | |
|
755 | 755 | def mergebase(ui, repo, *args, **kwargs): |
|
756 | 756 | cmdoptions = [] |
|
757 | 757 | args, opts = parseoptions(ui, cmdoptions, args) |
|
758 | 758 | |
|
759 | 759 | if len(args) != 2: |
|
760 | 760 | args = [b'A', b'B'] |
|
761 | 761 | |
|
762 | 762 | cmd = Command( |
|
763 | 763 | b"log -T '{node}\\n' -r 'ancestor(%s,%s)'" % (args[0], args[1]) |
|
764 | 764 | ) |
|
765 | 765 | |
|
766 | 766 | ui.status( |
|
767 | 767 | _(b'note: ancestors() is part of the revset language\n'), |
|
768 | 768 | _(b"(learn more about revsets with 'hg help revsets')\n\n"), |
|
769 | 769 | ) |
|
770 | 770 | ui.status((bytes(cmd)), b"\n") |
|
771 | 771 | |
|
772 | 772 | |
|
773 | 773 | def mergetool(ui, repo, *args, **kwargs): |
|
774 | 774 | cmdoptions = [] |
|
775 | 775 | args, opts = parseoptions(ui, cmdoptions, args) |
|
776 | 776 | |
|
777 | 777 | cmd = Command(b"resolve") |
|
778 | 778 | |
|
779 | 779 | if len(args) == 0: |
|
780 | 780 | cmd[b'--all'] = None |
|
781 | 781 | cmd.extend(args) |
|
782 | 782 | ui.status((bytes(cmd)), b"\n") |
|
783 | 783 | |
|
784 | 784 | |
|
785 | 785 | def mv(ui, repo, *args, **kwargs): |
|
786 | 786 | cmdoptions = [ |
|
787 | 787 | (b'f', b'force', None, b''), |
|
788 | 788 | (b'n', b'dry-run', None, b''), |
|
789 | 789 | ] |
|
790 | 790 | args, opts = parseoptions(ui, cmdoptions, args) |
|
791 | 791 | |
|
792 | 792 | cmd = Command(b'mv') |
|
793 | 793 | cmd.extend(args) |
|
794 | 794 | |
|
795 | 795 | if opts.get(b'force'): |
|
796 | 796 | cmd[b'-f'] = None |
|
797 | 797 | if opts.get(b'dry_run'): |
|
798 | 798 | cmd[b'-n'] = None |
|
799 | 799 | |
|
800 | 800 | ui.status((bytes(cmd)), b"\n") |
|
801 | 801 | |
|
802 | 802 | |
|
803 | 803 | def pull(ui, repo, *args, **kwargs): |
|
804 | 804 | cmdoptions = [ |
|
805 | 805 | (b'', b'all', None, b''), |
|
806 | 806 | (b'f', b'force', None, b''), |
|
807 | 807 | (b'r', b'rebase', None, b''), |
|
808 | 808 | ] |
|
809 | 809 | args, opts = parseoptions(ui, cmdoptions, args) |
|
810 | 810 | |
|
811 | 811 | cmd = Command(b'pull') |
|
812 | 812 | cmd[b'--rebase'] = None |
|
813 | 813 | |
|
814 | 814 | if len(args) > 0: |
|
815 | 815 | cmd.append(args[0]) |
|
816 | 816 | if len(args) > 1: |
|
817 | 817 | ui.status( |
|
818 | 818 | _( |
|
819 | 819 | b"note: Mercurial doesn't have refspecs. " |
|
820 | 820 | b"-r can be used to specify which commits you want to " |
|
821 | 821 | b"pull. -B can be used to specify which bookmark you " |
|
822 | 822 | b"want to pull.\n\n" |
|
823 | 823 | ) |
|
824 | 824 | ) |
|
825 | 825 | for v in args[1:]: |
|
826 | 826 | if v in repo._bookmarks: |
|
827 | 827 | cmd[b'-B'] = v |
|
828 | 828 | else: |
|
829 | 829 | cmd[b'-r'] = v |
|
830 | 830 | |
|
831 | 831 | ui.status((bytes(cmd)), b"\n") |
|
832 | 832 | |
|
833 | 833 | |
|
834 | 834 | def push(ui, repo, *args, **kwargs): |
|
835 | 835 | cmdoptions = [ |
|
836 | 836 | (b'', b'all', None, b''), |
|
837 | 837 | (b'f', b'force', None, b''), |
|
838 | 838 | ] |
|
839 | 839 | args, opts = parseoptions(ui, cmdoptions, args) |
|
840 | 840 | |
|
841 | 841 | cmd = Command(b'push') |
|
842 | 842 | |
|
843 | 843 | if len(args) > 0: |
|
844 | 844 | cmd.append(args[0]) |
|
845 | 845 | if len(args) > 1: |
|
846 | 846 | ui.status( |
|
847 | 847 | _( |
|
848 | 848 | b"note: Mercurial doesn't have refspecs. " |
|
849 | 849 | b"-r can be used to specify which commits you want " |
|
850 | 850 | b"to push. -B can be used to specify which bookmark " |
|
851 | 851 | b"you want to push.\n\n" |
|
852 | 852 | ) |
|
853 | 853 | ) |
|
854 | 854 | for v in args[1:]: |
|
855 | 855 | if v in repo._bookmarks: |
|
856 | 856 | cmd[b'-B'] = v |
|
857 | 857 | else: |
|
858 | 858 | cmd[b'-r'] = v |
|
859 | 859 | |
|
860 | 860 | if opts.get(b'force'): |
|
861 | 861 | cmd[b'-f'] = None |
|
862 | 862 | |
|
863 | 863 | ui.status((bytes(cmd)), b"\n") |
|
864 | 864 | |
|
865 | 865 | |
|
866 | 866 | def rebase(ui, repo, *args, **kwargs): |
|
867 | 867 | cmdoptions = [ |
|
868 | 868 | (b'', b'all', None, b''), |
|
869 | 869 | (b'i', b'interactive', None, b''), |
|
870 | 870 | (b'', b'onto', b'', b''), |
|
871 | 871 | (b'', b'abort', None, b''), |
|
872 | 872 | (b'', b'continue', None, b''), |
|
873 | 873 | (b'', b'skip', None, b''), |
|
874 | 874 | ] |
|
875 | 875 | args, opts = parseoptions(ui, cmdoptions, args) |
|
876 | 876 | |
|
877 | 877 | if opts.get(b'interactive'): |
|
878 | 878 | ui.status( |
|
879 | 879 | _( |
|
880 | 880 | b"note: hg histedit does not perform a rebase. " |
|
881 | 881 | b"It just edits history.\n\n" |
|
882 | 882 | ) |
|
883 | 883 | ) |
|
884 | 884 | cmd = Command(b'histedit') |
|
885 | 885 | if len(args) > 0: |
|
886 | 886 | ui.status( |
|
887 | 887 | _( |
|
888 | 888 | b"also note: 'hg histedit' will automatically detect" |
|
889 | 889 | b" your stack, so no second argument is necessary\n\n" |
|
890 | 890 | ) |
|
891 | 891 | ) |
|
892 | 892 | ui.status((bytes(cmd)), b"\n") |
|
893 | 893 | return |
|
894 | 894 | |
|
895 | 895 | if opts.get(b'skip'): |
|
896 | 896 | cmd = Command(b'revert --all -r .') |
|
897 | 897 | ui.status((bytes(cmd)), b"\n") |
|
898 | 898 | |
|
899 | 899 | cmd = Command(b'rebase') |
|
900 | 900 | |
|
901 | 901 | if opts.get(b'continue') or opts.get(b'skip'): |
|
902 | 902 | cmd[b'--continue'] = None |
|
903 | 903 | if opts.get(b'abort'): |
|
904 | 904 | cmd[b'--abort'] = None |
|
905 | 905 | |
|
906 | 906 | if opts.get(b'onto'): |
|
907 | 907 | ui.status( |
|
908 | 908 | _( |
|
909 | 909 | b"note: if you're trying to lift a commit off one branch, " |
|
910 | 910 | b"try hg rebase -d <destination commit> -s <commit to be " |
|
911 | 911 | b"lifted>\n\n" |
|
912 | 912 | ) |
|
913 | 913 | ) |
|
914 | 914 | cmd[b'-d'] = convert(opts.get(b'onto')) |
|
915 | 915 | if len(args) < 2: |
|
916 | 916 | raise error.Abort(_(b"expected format: git rebase --onto X Y Z")) |
|
917 | 917 | cmd[b'-s'] = b"'::%s - ::%s'" % (convert(args[1]), convert(args[0])) |
|
918 | 918 | else: |
|
919 | 919 | if len(args) == 1: |
|
920 | 920 | cmd[b'-d'] = convert(args[0]) |
|
921 | 921 | elif len(args) == 2: |
|
922 | 922 | cmd[b'-d'] = convert(args[0]) |
|
923 | 923 | cmd[b'-b'] = convert(args[1]) |
|
924 | 924 | |
|
925 | 925 | ui.status((bytes(cmd)), b"\n") |
|
926 | 926 | |
|
927 | 927 | |
|
928 | 928 | def reflog(ui, repo, *args, **kwargs): |
|
929 | 929 | cmdoptions = [ |
|
930 | 930 | (b'', b'all', None, b''), |
|
931 | 931 | ] |
|
932 | 932 | args, opts = parseoptions(ui, cmdoptions, args) |
|
933 | 933 | |
|
934 | 934 | cmd = Command(b'journal') |
|
935 | 935 | if opts.get(b'all'): |
|
936 | 936 | cmd[b'--all'] = None |
|
937 | 937 | if len(args) > 0: |
|
938 | 938 | cmd.append(args[0]) |
|
939 | 939 | |
|
940 | 940 | ui.status(bytes(cmd), b"\n\n") |
|
941 | 941 | ui.status( |
|
942 | 942 | _( |
|
943 | 943 | b"note: in hg commits can be deleted from repo but we always" |
|
944 | 944 | b" have backups\n" |
|
945 | 945 | ) |
|
946 | 946 | ) |
|
947 | 947 | |
|
948 | 948 | |
|
949 | 949 | def reset(ui, repo, *args, **kwargs): |
|
950 | 950 | cmdoptions = [ |
|
951 | 951 | (b'', b'soft', None, b''), |
|
952 | 952 | (b'', b'hard', None, b''), |
|
953 | 953 | (b'', b'mixed', None, b''), |
|
954 | 954 | ] |
|
955 | 955 | args, opts = parseoptions(ui, cmdoptions, args) |
|
956 | 956 | |
|
957 | 957 | commit = convert(args[0] if len(args) > 0 else b'.') |
|
958 | 958 | hard = opts.get(b'hard') |
|
959 | 959 | |
|
960 | 960 | if opts.get(b'mixed'): |
|
961 | 961 | ui.status( |
|
962 | 962 | _( |
|
963 | 963 | b'note: --mixed has no meaning since Mercurial has no ' |
|
964 | 964 | b'staging area\n\n' |
|
965 | 965 | ) |
|
966 | 966 | ) |
|
967 | 967 | if opts.get(b'soft'): |
|
968 | 968 | ui.status( |
|
969 | 969 | _( |
|
970 | 970 | b'note: --soft has no meaning since Mercurial has no ' |
|
971 | 971 | b'staging area\n\n' |
|
972 | 972 | ) |
|
973 | 973 | ) |
|
974 | 974 | |
|
975 | 975 | cmd = Command(b'update') |
|
976 | 976 | if hard: |
|
977 | 977 | cmd.append(b'--clean') |
|
978 | 978 | |
|
979 | 979 | cmd.append(commit) |
|
980 | 980 | |
|
981 | 981 | ui.status((bytes(cmd)), b"\n") |
|
982 | 982 | |
|
983 | 983 | |
|
984 | 984 | def revert(ui, repo, *args, **kwargs): |
|
985 | 985 | cmdoptions = [] |
|
986 | 986 | args, opts = parseoptions(ui, cmdoptions, args) |
|
987 | 987 | |
|
988 | 988 | if len(args) > 1: |
|
989 | 989 | ui.status( |
|
990 | 990 | _( |
|
991 | 991 | b"note: hg backout doesn't support multiple commits at " |
|
992 | 992 | b"once\n\n" |
|
993 | 993 | ) |
|
994 | 994 | ) |
|
995 | 995 | |
|
996 | 996 | cmd = Command(b'backout') |
|
997 | 997 | if args: |
|
998 | 998 | cmd.append(args[0]) |
|
999 | 999 | |
|
1000 | 1000 | ui.status((bytes(cmd)), b"\n") |
|
1001 | 1001 | |
|
1002 | 1002 | |
|
1003 | 1003 | def revparse(ui, repo, *args, **kwargs): |
|
1004 | 1004 | cmdoptions = [ |
|
1005 | 1005 | (b'', b'show-cdup', None, b''), |
|
1006 | 1006 | (b'', b'show-toplevel', None, b''), |
|
1007 | 1007 | ] |
|
1008 | 1008 | args, opts = parseoptions(ui, cmdoptions, args) |
|
1009 | 1009 | |
|
1010 | 1010 | if opts.get(b'show_cdup') or opts.get(b'show_toplevel'): |
|
1011 | 1011 | cmd = Command(b'root') |
|
1012 | 1012 | if opts.get(b'show_cdup'): |
|
1013 | 1013 | ui.status(_(b"note: hg root prints the root of the repository\n\n")) |
|
1014 | 1014 | ui.status((bytes(cmd)), b"\n") |
|
1015 | 1015 | else: |
|
1016 | 1016 | ui.status(_(b"note: see hg help revset for how to refer to commits\n")) |
|
1017 | 1017 | |
|
1018 | 1018 | |
|
1019 | 1019 | def rm(ui, repo, *args, **kwargs): |
|
1020 | 1020 | cmdoptions = [ |
|
1021 | 1021 | (b'f', b'force', None, b''), |
|
1022 | 1022 | (b'n', b'dry-run', None, b''), |
|
1023 | 1023 | ] |
|
1024 | 1024 | args, opts = parseoptions(ui, cmdoptions, args) |
|
1025 | 1025 | |
|
1026 | 1026 | cmd = Command(b'rm') |
|
1027 | 1027 | cmd.extend(args) |
|
1028 | 1028 | |
|
1029 | 1029 | if opts.get(b'force'): |
|
1030 | 1030 | cmd[b'-f'] = None |
|
1031 | 1031 | if opts.get(b'dry_run'): |
|
1032 | 1032 | cmd[b'-n'] = None |
|
1033 | 1033 | |
|
1034 | 1034 | ui.status((bytes(cmd)), b"\n") |
|
1035 | 1035 | |
|
1036 | 1036 | |
|
1037 | 1037 | def show(ui, repo, *args, **kwargs): |
|
1038 | 1038 | cmdoptions = [ |
|
1039 | 1039 | (b'', b'name-status', None, b''), |
|
1040 | 1040 | (b'', b'pretty', b'', b''), |
|
1041 | 1041 | (b'U', b'unified', int, b''), |
|
1042 | 1042 | ] |
|
1043 | 1043 | args, opts = parseoptions(ui, cmdoptions, args) |
|
1044 | 1044 | |
|
1045 | 1045 | if opts.get(b'name_status'): |
|
1046 | 1046 | if opts.get(b'pretty') == b'format:': |
|
1047 | 1047 | cmd = Command(b'status') |
|
1048 | 1048 | cmd[b'--change'] = b'.' |
|
1049 | 1049 | else: |
|
1050 | 1050 | cmd = Command(b'log') |
|
1051 | 1051 | cmd.append(b'--style status') |
|
1052 | 1052 | cmd.append(b'-r .') |
|
1053 | 1053 | elif len(args) > 0: |
|
1054 | 1054 | if ispath(repo, args[0]): |
|
1055 | 1055 | cmd = Command(b'cat') |
|
1056 | 1056 | else: |
|
1057 | 1057 | cmd = Command(b'export') |
|
1058 | 1058 | cmd.extend(args) |
|
1059 | 1059 | if opts.get(b'unified'): |
|
1060 | 1060 | cmd.append(b'--config diff.unified=%d' % (opts[b'unified'],)) |
|
1061 | 1061 | elif opts.get(b'unified'): |
|
1062 | 1062 | cmd = Command(b'export') |
|
1063 | 1063 | cmd.append(b'--config diff.unified=%d' % (opts[b'unified'],)) |
|
1064 | 1064 | else: |
|
1065 | 1065 | cmd = Command(b'export') |
|
1066 | 1066 | |
|
1067 | 1067 | ui.status((bytes(cmd)), b"\n") |
|
1068 | 1068 | |
|
1069 | 1069 | |
|
1070 | 1070 | def stash(ui, repo, *args, **kwargs): |
|
1071 | 1071 | cmdoptions = [ |
|
1072 | 1072 | (b'p', b'patch', None, b''), |
|
1073 | 1073 | ] |
|
1074 | 1074 | args, opts = parseoptions(ui, cmdoptions, args) |
|
1075 | 1075 | |
|
1076 | 1076 | cmd = Command(b'shelve') |
|
1077 | 1077 | action = args[0] if len(args) > 0 else None |
|
1078 | 1078 | |
|
1079 | 1079 | if action == b'list': |
|
1080 | 1080 | cmd[b'-l'] = None |
|
1081 | 1081 | if opts.get(b'patch'): |
|
1082 | 1082 | cmd[b'-p'] = None |
|
1083 | 1083 | elif action == b'show': |
|
1084 | 1084 | if opts.get(b'patch'): |
|
1085 | 1085 | cmd[b'-p'] = None |
|
1086 | 1086 | else: |
|
1087 | 1087 | cmd[b'--stat'] = None |
|
1088 | 1088 | if len(args) > 1: |
|
1089 | 1089 | cmd.append(args[1]) |
|
1090 | 1090 | elif action == b'clear': |
|
1091 | 1091 | cmd[b'--cleanup'] = None |
|
1092 | 1092 | elif action == b'drop': |
|
1093 | 1093 | cmd[b'-d'] = None |
|
1094 | 1094 | if len(args) > 1: |
|
1095 | 1095 | cmd.append(args[1]) |
|
1096 | 1096 | else: |
|
1097 | 1097 | cmd.append(b'<shelve name>') |
|
1098 | 1098 | elif action == b'pop' or action == b'apply': |
|
1099 | 1099 | cmd = Command(b'unshelve') |
|
1100 | 1100 | if len(args) > 1: |
|
1101 | 1101 | cmd.append(args[1]) |
|
1102 | 1102 | if action == b'apply': |
|
1103 | 1103 | cmd[b'--keep'] = None |
|
1104 | 1104 | elif action == b'branch' or action == b'create': |
|
1105 | 1105 | ui.status( |
|
1106 | 1106 | _( |
|
1107 | 1107 | b"note: Mercurial doesn't have equivalents to the " |
|
1108 | 1108 | b"git stash branch or create actions\n\n" |
|
1109 | 1109 | ) |
|
1110 | 1110 | ) |
|
1111 | 1111 | return |
|
1112 | 1112 | else: |
|
1113 | 1113 | if len(args) > 0: |
|
1114 | 1114 | if args[0] != b'save': |
|
1115 | 1115 | cmd[b'--name'] = args[0] |
|
1116 | 1116 | elif len(args) > 1: |
|
1117 | 1117 | cmd[b'--name'] = args[1] |
|
1118 | 1118 | |
|
1119 | 1119 | ui.status((bytes(cmd)), b"\n") |
|
1120 | 1120 | |
|
1121 | 1121 | |
|
1122 | 1122 | def status(ui, repo, *args, **kwargs): |
|
1123 | 1123 | cmdoptions = [ |
|
1124 | 1124 | (b'', b'ignored', None, b''), |
|
1125 | 1125 | ] |
|
1126 | 1126 | args, opts = parseoptions(ui, cmdoptions, args) |
|
1127 | 1127 | |
|
1128 | 1128 | cmd = Command(b'status') |
|
1129 | 1129 | cmd.extend(args) |
|
1130 | 1130 | |
|
1131 | 1131 | if opts.get(b'ignored'): |
|
1132 | 1132 | cmd[b'-i'] = None |
|
1133 | 1133 | |
|
1134 | 1134 | ui.status((bytes(cmd)), b"\n") |
|
1135 | 1135 | |
|
1136 | 1136 | |
|
1137 | 1137 | def svn(ui, repo, *args, **kwargs): |
|
1138 | 1138 | if not args: |
|
1139 | 1139 | raise error.Abort(_(b'missing svn command')) |
|
1140 | 1140 | svncmd = args[0] |
|
1141 | 1141 | if svncmd not in gitsvncommands: |
|
1142 | 1142 | raise error.Abort(_(b'unknown git svn command "%s"') % svncmd) |
|
1143 | 1143 | |
|
1144 | 1144 | args = args[1:] |
|
1145 | 1145 | return gitsvncommands[svncmd](ui, repo, *args, **kwargs) |
|
1146 | 1146 | |
|
1147 | 1147 | |
|
1148 | 1148 | def svndcommit(ui, repo, *args, **kwargs): |
|
1149 | 1149 | cmdoptions = [] |
|
1150 | 1150 | parseoptions(ui, cmdoptions, args) |
|
1151 | 1151 | |
|
1152 | 1152 | cmd = Command(b'push') |
|
1153 | 1153 | |
|
1154 | 1154 | ui.status((bytes(cmd)), b"\n") |
|
1155 | 1155 | |
|
1156 | 1156 | |
|
1157 | 1157 | def svnfetch(ui, repo, *args, **kwargs): |
|
1158 | 1158 | cmdoptions = [] |
|
1159 | 1159 | parseoptions(ui, cmdoptions, args) |
|
1160 | 1160 | |
|
1161 | 1161 | cmd = Command(b'pull') |
|
1162 | 1162 | cmd.append(b'default-push') |
|
1163 | 1163 | |
|
1164 | 1164 | ui.status((bytes(cmd)), b"\n") |
|
1165 | 1165 | |
|
1166 | 1166 | |
|
1167 | 1167 | def svnfindrev(ui, repo, *args, **kwargs): |
|
1168 | 1168 | cmdoptions = [] |
|
1169 | 1169 | args, opts = parseoptions(ui, cmdoptions, args) |
|
1170 | 1170 | |
|
1171 | 1171 | if not args: |
|
1172 | 1172 | raise error.Abort(_(b'missing find-rev argument')) |
|
1173 | 1173 | |
|
1174 | 1174 | cmd = Command(b'log') |
|
1175 | 1175 | cmd[b'-r'] = args[0] |
|
1176 | 1176 | |
|
1177 | 1177 | ui.status((bytes(cmd)), b"\n") |
|
1178 | 1178 | |
|
1179 | 1179 | |
|
1180 | 1180 | def svnrebase(ui, repo, *args, **kwargs): |
|
1181 | 1181 | cmdoptions = [ |
|
1182 | 1182 | (b'l', b'local', None, b''), |
|
1183 | 1183 | ] |
|
1184 | 1184 | parseoptions(ui, cmdoptions, args) |
|
1185 | 1185 | |
|
1186 | 1186 | pullcmd = Command(b'pull') |
|
1187 | 1187 | pullcmd.append(b'default-push') |
|
1188 | 1188 | rebasecmd = Command(b'rebase') |
|
1189 | 1189 | rebasecmd.append(b'tip') |
|
1190 | 1190 | |
|
1191 | 1191 | cmd = pullcmd & rebasecmd |
|
1192 | 1192 | |
|
1193 | 1193 | ui.status((bytes(cmd)), b"\n") |
|
1194 | 1194 | |
|
1195 | 1195 | |
|
1196 | 1196 | def tag(ui, repo, *args, **kwargs): |
|
1197 | 1197 | cmdoptions = [ |
|
1198 | 1198 | (b'f', b'force', None, b''), |
|
1199 | 1199 | (b'l', b'list', None, b''), |
|
1200 | 1200 | (b'd', b'delete', None, b''), |
|
1201 | 1201 | ] |
|
1202 | 1202 | args, opts = parseoptions(ui, cmdoptions, args) |
|
1203 | 1203 | |
|
1204 | 1204 | if opts.get(b'list'): |
|
1205 | 1205 | cmd = Command(b'tags') |
|
1206 | 1206 | else: |
|
1207 | 1207 | cmd = Command(b'tag') |
|
1208 | 1208 | |
|
1209 | 1209 | if not args: |
|
1210 | 1210 | raise error.Abort(_(b'missing tag argument')) |
|
1211 | 1211 | |
|
1212 | 1212 | cmd.append(args[0]) |
|
1213 | 1213 | if len(args) > 1: |
|
1214 | 1214 | cmd[b'-r'] = args[1] |
|
1215 | 1215 | |
|
1216 | 1216 | if opts.get(b'delete'): |
|
1217 | 1217 | cmd[b'--remove'] = None |
|
1218 | 1218 | |
|
1219 | 1219 | if opts.get(b'force'): |
|
1220 | 1220 | cmd[b'-f'] = None |
|
1221 | 1221 | |
|
1222 | 1222 | ui.status((bytes(cmd)), b"\n") |
|
1223 | 1223 | |
|
1224 | 1224 | |
|
1225 | 1225 | gitcommands = { |
|
1226 | 1226 | b'add': add, |
|
1227 | 1227 | b'am': am, |
|
1228 | 1228 | b'apply': apply, |
|
1229 | 1229 | b'bisect': bisect, |
|
1230 | 1230 | b'blame': blame, |
|
1231 | 1231 | b'branch': branch, |
|
1232 | 1232 | b'checkout': checkout, |
|
1233 | 1233 | b'cherry-pick': cherrypick, |
|
1234 | 1234 | b'clean': clean, |
|
1235 | 1235 | b'clone': clone, |
|
1236 | 1236 | b'commit': commit, |
|
1237 | 1237 | b'diff': diff, |
|
1238 | 1238 | b'difftool': difftool, |
|
1239 | 1239 | b'fetch': fetch, |
|
1240 | 1240 | b'grep': grep, |
|
1241 | 1241 | b'init': init, |
|
1242 | 1242 | b'log': log, |
|
1243 | 1243 | b'ls-files': lsfiles, |
|
1244 | 1244 | b'merge': merge, |
|
1245 | 1245 | b'merge-base': mergebase, |
|
1246 | 1246 | b'mergetool': mergetool, |
|
1247 | 1247 | b'mv': mv, |
|
1248 | 1248 | b'pull': pull, |
|
1249 | 1249 | b'push': push, |
|
1250 | 1250 | b'rebase': rebase, |
|
1251 | 1251 | b'reflog': reflog, |
|
1252 | 1252 | b'reset': reset, |
|
1253 | 1253 | b'revert': revert, |
|
1254 | 1254 | b'rev-parse': revparse, |
|
1255 | 1255 | b'rm': rm, |
|
1256 | 1256 | b'show': show, |
|
1257 | 1257 | b'stash': stash, |
|
1258 | 1258 | b'status': status, |
|
1259 | 1259 | b'svn': svn, |
|
1260 | 1260 | b'tag': tag, |
|
1261 | 1261 | b'whatchanged': deprecated, |
|
1262 | 1262 | } |
|
1263 | 1263 | |
|
1264 | 1264 | gitsvncommands = { |
|
1265 | 1265 | b'dcommit': svndcommit, |
|
1266 | 1266 | b'fetch': svnfetch, |
|
1267 | 1267 | b'find-rev': svnfindrev, |
|
1268 | 1268 | b'rebase': svnrebase, |
|
1269 | 1269 | } |
@@ -1,387 +1,385 b'' | |||
|
1 | 1 | # Minimal support for git commands on an hg repository |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''browse the repository in a graphical way |
|
9 | 9 | |
|
10 | 10 | The hgk extension allows browsing the history of a repository in a |
|
11 | 11 | graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not |
|
12 | 12 | distributed with Mercurial.) |
|
13 | 13 | |
|
14 | 14 | hgk consists of two parts: a Tcl script that does the displaying and |
|
15 | 15 | querying of information, and an extension to Mercurial named hgk.py, |
|
16 | 16 | which provides hooks for hgk to get information. hgk can be found in |
|
17 | 17 | the contrib directory, and the extension is shipped in the hgext |
|
18 | 18 | repository, and needs to be enabled. |
|
19 | 19 | |
|
20 | 20 | The :hg:`view` command will launch the hgk Tcl script. For this command |
|
21 | 21 | to work, hgk must be in your search path. Alternately, you can specify |
|
22 | 22 | the path to hgk in your configuration file:: |
|
23 | 23 | |
|
24 | 24 | [hgk] |
|
25 | 25 | path = /location/of/hgk |
|
26 | 26 | |
|
27 | 27 | hgk can make use of the extdiff extension to visualize revisions. |
|
28 | 28 | Assuming you had already configured extdiff vdiff command, just add:: |
|
29 | 29 | |
|
30 | 30 | [hgk] |
|
31 | 31 | vdiff=vdiff |
|
32 | 32 | |
|
33 | 33 | Revisions context menu will now display additional entries to fire |
|
34 | 34 | vdiff on hovered and selected revisions. |
|
35 | 35 | ''' |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | import os |
|
39 | 39 | |
|
40 | 40 | from mercurial.i18n import _ |
|
41 | 41 | from mercurial.node import ( |
|
42 | 42 | nullrev, |
|
43 | 43 | short, |
|
44 | 44 | ) |
|
45 | 45 | from mercurial import ( |
|
46 | 46 | commands, |
|
47 | 47 | obsolete, |
|
48 | 48 | patch, |
|
49 | 49 | pycompat, |
|
50 | 50 | registrar, |
|
51 | 51 | scmutil, |
|
52 | 52 | ) |
|
53 | 53 | |
|
54 | 54 | cmdtable = {} |
|
55 | 55 | command = registrar.command(cmdtable) |
|
56 | 56 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
57 | 57 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
58 | 58 | # be specifying the version(s) of Mercurial they are tested with, or |
|
59 | 59 | # leave the attribute unspecified. |
|
60 | 60 | testedwith = b'ships-with-hg-core' |
|
61 | 61 | |
|
62 | 62 | configtable = {} |
|
63 | 63 | configitem = registrar.configitem(configtable) |
|
64 | 64 | |
|
65 | 65 | configitem( |
|
66 | 66 | b'hgk', |
|
67 | 67 | b'path', |
|
68 | 68 | default=b'hgk', |
|
69 | 69 | ) |
|
70 | 70 | |
|
71 | 71 | |
|
72 | 72 | @command( |
|
73 | 73 | b'debug-diff-tree', |
|
74 | 74 | [ |
|
75 | 75 | (b'p', b'patch', None, _(b'generate patch')), |
|
76 | 76 | (b'r', b'recursive', None, _(b'recursive')), |
|
77 | 77 | (b'P', b'pretty', None, _(b'pretty')), |
|
78 | 78 | (b's', b'stdin', None, _(b'stdin')), |
|
79 | 79 | (b'C', b'copy', None, _(b'detect copies')), |
|
80 | 80 | (b'S', b'search', b"", _(b'search')), |
|
81 | 81 | ], |
|
82 | 82 | b'[OPTION]... NODE1 NODE2 [FILE]...', |
|
83 | 83 | inferrepo=True, |
|
84 | 84 | ) |
|
85 | 85 | def difftree(ui, repo, node1=None, node2=None, *files, **opts): |
|
86 | 86 | """diff trees from two commits""" |
|
87 | 87 | |
|
88 | 88 | def __difftree(repo, node1, node2, files=None): |
|
89 | 89 | assert node2 is not None |
|
90 | 90 | if files is None: |
|
91 | 91 | files = [] |
|
92 | 92 | mmap = repo[node1].manifest() |
|
93 | 93 | mmap2 = repo[node2].manifest() |
|
94 | 94 | m = scmutil.match(repo[node1], files) |
|
95 | 95 | st = repo.status(node1, node2, m) |
|
96 | 96 | empty = short(repo.nullid) |
|
97 | 97 | |
|
98 | 98 | for f in st.modified: |
|
99 | 99 | # TODO get file permissions |
|
100 | 100 | ui.writenoi18n( |
|
101 | 101 | b":100664 100664 %s %s M\t%s\t%s\n" |
|
102 | 102 | % (short(mmap[f]), short(mmap2[f]), f, f) |
|
103 | 103 | ) |
|
104 | 104 | for f in st.added: |
|
105 | 105 | ui.writenoi18n( |
|
106 | 106 | b":000000 100664 %s %s N\t%s\t%s\n" |
|
107 | 107 | % (empty, short(mmap2[f]), f, f) |
|
108 | 108 | ) |
|
109 | 109 | for f in st.removed: |
|
110 | 110 | ui.writenoi18n( |
|
111 | 111 | b":100664 000000 %s %s D\t%s\t%s\n" |
|
112 | 112 | % (short(mmap[f]), empty, f, f) |
|
113 | 113 | ) |
|
114 | 114 | |
|
115 | 115 | ## |
|
116 | 116 | |
|
117 | 117 | while True: |
|
118 | 118 | if opts['stdin']: |
|
119 | 119 | line = ui.fin.readline() |
|
120 | 120 | if not line: |
|
121 | 121 | break |
|
122 | 122 | line = line.rstrip(pycompat.oslinesep).split(b' ') |
|
123 | 123 | node1 = line[0] |
|
124 | 124 | if len(line) > 1: |
|
125 | 125 | node2 = line[1] |
|
126 | 126 | else: |
|
127 | 127 | node2 = None |
|
128 | 128 | node1 = repo.lookup(node1) |
|
129 | 129 | if node2: |
|
130 | 130 | node2 = repo.lookup(node2) |
|
131 | 131 | else: |
|
132 | 132 | node2 = node1 |
|
133 | 133 | node1 = repo.changelog.parents(node1)[0] |
|
134 | 134 | if opts['patch']: |
|
135 | 135 | if opts['pretty']: |
|
136 | 136 | catcommit(ui, repo, node2, b"") |
|
137 | 137 | m = scmutil.match(repo[node1], files) |
|
138 | 138 | diffopts = patch.difffeatureopts(ui) |
|
139 | 139 | diffopts.git = True |
|
140 | 140 | chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts) |
|
141 | 141 | for chunk in chunks: |
|
142 | 142 | ui.write(chunk) |
|
143 | 143 | else: |
|
144 | 144 | __difftree(repo, node1, node2, files=files) |
|
145 | 145 | if not opts['stdin']: |
|
146 | 146 | break |
|
147 | 147 | |
|
148 | 148 | |
|
149 | 149 | def catcommit(ui, repo, n, prefix, ctx=None): |
|
150 | 150 | nlprefix = b'\n' + prefix |
|
151 | 151 | if ctx is None: |
|
152 | 152 | ctx = repo[n] |
|
153 | 153 | # use ctx.node() instead ?? |
|
154 | 154 | ui.write((b"tree %s\n" % short(ctx.changeset()[0]))) |
|
155 | 155 | for p in ctx.parents(): |
|
156 | 156 | ui.write((b"parent %s\n" % p)) |
|
157 | 157 | |
|
158 | 158 | date = ctx.date() |
|
159 | 159 | description = ctx.description().replace(b"\0", b"") |
|
160 | 160 | ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1]))) |
|
161 | 161 | |
|
162 | 162 | if b'committer' in ctx.extra(): |
|
163 | 163 | ui.write((b"committer %s\n" % ctx.extra()[b'committer'])) |
|
164 | 164 | |
|
165 | 165 | ui.write((b"revision %d\n" % ctx.rev())) |
|
166 | 166 | ui.write((b"branch %s\n" % ctx.branch())) |
|
167 | 167 | if obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
168 | 168 | if ctx.obsolete(): |
|
169 | 169 | ui.writenoi18n(b"obsolete\n") |
|
170 | 170 | ui.write((b"phase %s\n\n" % ctx.phasestr())) |
|
171 | 171 | |
|
172 | 172 | if prefix != b"": |
|
173 | 173 | ui.write( |
|
174 | 174 | b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip()) |
|
175 | 175 | ) |
|
176 | 176 | else: |
|
177 | 177 | ui.write(description + b"\n") |
|
178 | 178 | if prefix: |
|
179 | 179 | ui.write(b'\0') |
|
180 | 180 | |
|
181 | 181 | |
|
182 | 182 | @command(b'debug-merge-base', [], _(b'REV REV')) |
|
183 | 183 | def base(ui, repo, node1, node2): |
|
184 | 184 | """output common ancestor information""" |
|
185 | 185 | node1 = repo.lookup(node1) |
|
186 | 186 | node2 = repo.lookup(node2) |
|
187 | 187 | n = repo.changelog.ancestor(node1, node2) |
|
188 | 188 | ui.write(short(n) + b"\n") |
|
189 | 189 | |
|
190 | 190 | |
|
191 | 191 | @command( |
|
192 | 192 | b'debug-cat-file', |
|
193 | 193 | [(b's', b'stdin', None, _(b'stdin'))], |
|
194 | 194 | _(b'[OPTION]... TYPE FILE'), |
|
195 | 195 | inferrepo=True, |
|
196 | 196 | ) |
|
197 | 197 | def catfile(ui, repo, type=None, r=None, **opts): |
|
198 | 198 | """cat a specific revision""" |
|
199 | 199 | # in stdin mode, every line except the commit is prefixed with two |
|
200 | 200 | # spaces. This way the our caller can find the commit without magic |
|
201 | 201 | # strings |
|
202 | 202 | # |
|
203 | 203 | prefix = b"" |
|
204 | 204 | if opts['stdin']: |
|
205 | 205 | line = ui.fin.readline() |
|
206 | 206 | if not line: |
|
207 | 207 | return |
|
208 | 208 | (type, r) = line.rstrip(pycompat.oslinesep).split(b' ') |
|
209 | 209 | prefix = b" " |
|
210 | 210 | else: |
|
211 | 211 | if not type or not r: |
|
212 | 212 | ui.warn(_(b"cat-file: type or revision not supplied\n")) |
|
213 | 213 | commands.help_(ui, b'cat-file') |
|
214 | 214 | |
|
215 | 215 | while r: |
|
216 | 216 | if type != b"commit": |
|
217 | 217 | ui.warn(_(b"aborting hg cat-file only understands commits\n")) |
|
218 | 218 | return 1 |
|
219 | 219 | n = repo.lookup(r) |
|
220 | 220 | catcommit(ui, repo, n, prefix) |
|
221 | 221 | if opts['stdin']: |
|
222 | 222 | line = ui.fin.readline() |
|
223 | 223 | if not line: |
|
224 | 224 | break |
|
225 | 225 | (type, r) = line.rstrip(pycompat.oslinesep).split(b' ') |
|
226 | 226 | else: |
|
227 | 227 | break |
|
228 | 228 | |
|
229 | 229 | |
|
230 | 230 | # git rev-tree is a confusing thing. You can supply a number of |
|
231 | 231 | # commit sha1s on the command line, and it walks the commit history |
|
232 | 232 | # telling you which commits are reachable from the supplied ones via |
|
233 | 233 | # a bitmask based on arg position. |
|
234 | 234 | # you can specify a commit to stop at by starting the sha1 with ^ |
|
235 | 235 | def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False): |
|
236 | 236 | def chlogwalk(): |
|
237 | 237 | count = len(repo) |
|
238 | 238 | i = count |
|
239 | 239 | l = [0] * 100 |
|
240 | 240 | chunk = 100 |
|
241 | 241 | while True: |
|
242 | 242 | if chunk > i: |
|
243 | 243 | chunk = i |
|
244 | 244 | i = 0 |
|
245 | 245 | else: |
|
246 | 246 | i -= chunk |
|
247 | 247 | |
|
248 | 248 | for x in pycompat.xrange(chunk): |
|
249 | 249 | if i + x >= count: |
|
250 | 250 | l[chunk - x :] = [0] * (chunk - x) |
|
251 | 251 | break |
|
252 | 252 | if full is not None: |
|
253 | 253 | if (i + x) in repo: |
|
254 | 254 | l[x] = repo[i + x] |
|
255 | 255 | l[x].changeset() # force reading |
|
256 | 256 | else: |
|
257 | 257 | if (i + x) in repo: |
|
258 | 258 | l[x] = 1 |
|
259 | 259 | for x in pycompat.xrange(chunk - 1, -1, -1): |
|
260 | 260 | if l[x] != 0: |
|
261 | 261 | yield (i + x, full is not None and l[x] or None) |
|
262 | 262 | if i == 0: |
|
263 | 263 | break |
|
264 | 264 | |
|
265 | 265 | # calculate and return the reachability bitmask for sha |
|
266 | 266 | def is_reachable(ar, reachable, sha): |
|
267 | 267 | if len(ar) == 0: |
|
268 | 268 | return 1 |
|
269 | 269 | mask = 0 |
|
270 | 270 | for i in pycompat.xrange(len(ar)): |
|
271 | 271 | if sha in reachable[i]: |
|
272 | 272 | mask |= 1 << i |
|
273 | 273 | |
|
274 | 274 | return mask |
|
275 | 275 | |
|
276 | 276 | reachable = [] |
|
277 | 277 | stop_sha1 = [] |
|
278 | 278 | want_sha1 = [] |
|
279 | 279 | count = 0 |
|
280 | 280 | |
|
281 | 281 | # figure out which commits they are asking for and which ones they |
|
282 | 282 | # want us to stop on |
|
283 | 283 | for i, arg in enumerate(args): |
|
284 | 284 | if arg.startswith(b'^'): |
|
285 | 285 | s = repo.lookup(arg[1:]) |
|
286 | 286 | stop_sha1.append(s) |
|
287 | 287 | want_sha1.append(s) |
|
288 | 288 | elif arg != b'HEAD': |
|
289 | 289 | want_sha1.append(repo.lookup(arg)) |
|
290 | 290 | |
|
291 | 291 | # calculate the graph for the supplied commits |
|
292 | 292 | for i, n in enumerate(want_sha1): |
|
293 | 293 | reachable.append(set()) |
|
294 | 294 | visit = [n] |
|
295 | 295 | reachable[i].add(n) |
|
296 | 296 | while visit: |
|
297 | 297 | n = visit.pop(0) |
|
298 | 298 | if n in stop_sha1: |
|
299 | 299 | continue |
|
300 | 300 | for p in repo.changelog.parents(n): |
|
301 | 301 | if p not in reachable[i]: |
|
302 | 302 | reachable[i].add(p) |
|
303 | 303 | visit.append(p) |
|
304 | 304 | if p in stop_sha1: |
|
305 | 305 | continue |
|
306 | 306 | |
|
307 | 307 | # walk the repository looking for commits that are in our |
|
308 | 308 | # reachability graph |
|
309 | 309 | for i, ctx in chlogwalk(): |
|
310 | 310 | if i not in repo: |
|
311 | 311 | continue |
|
312 | 312 | n = repo.changelog.node(i) |
|
313 | 313 | mask = is_reachable(want_sha1, reachable, n) |
|
314 | 314 | if mask: |
|
315 | 315 | parentstr = b"" |
|
316 | 316 | if parents: |
|
317 | 317 | pp = repo.changelog.parents(n) |
|
318 | 318 | if pp[0] != repo.nullid: |
|
319 | 319 | parentstr += b" " + short(pp[0]) |
|
320 | 320 | if pp[1] != repo.nullid: |
|
321 | 321 | parentstr += b" " + short(pp[1]) |
|
322 | 322 | if not full: |
|
323 | 323 | ui.write(b"%s%s\n" % (short(n), parentstr)) |
|
324 | 324 | elif full == b"commit": |
|
325 | 325 | ui.write(b"%s%s\n" % (short(n), parentstr)) |
|
326 | 326 | catcommit(ui, repo, n, b' ', ctx) |
|
327 | 327 | else: |
|
328 | 328 | (p1, p2) = repo.changelog.parents(n) |
|
329 | 329 | (h, h1, h2) = map(short, (n, p1, p2)) |
|
330 | 330 | (i1, i2) = map(repo.changelog.rev, (p1, p2)) |
|
331 | 331 | |
|
332 | 332 | date = ctx.date()[0] |
|
333 | 333 | ui.write(b"%s %s:%s" % (date, h, mask)) |
|
334 | 334 | mask = is_reachable(want_sha1, reachable, p1) |
|
335 | 335 | if i1 != nullrev and mask > 0: |
|
336 | 336 | ui.write(b"%s:%s " % (h1, mask)), |
|
337 | 337 | mask = is_reachable(want_sha1, reachable, p2) |
|
338 | 338 | if i2 != nullrev and mask > 0: |
|
339 | 339 | ui.write(b"%s:%s " % (h2, mask)) |
|
340 | 340 | ui.write(b"\n") |
|
341 | 341 | if maxnr and count >= maxnr: |
|
342 | 342 | break |
|
343 | 343 | count += 1 |
|
344 | 344 | |
|
345 | 345 | |
|
346 | 346 | # git rev-list tries to order things by date, and has the ability to stop |
|
347 | 347 | # at a given commit without walking the whole repo. TODO add the stop |
|
348 | 348 | # parameter |
|
349 | 349 | @command( |
|
350 | 350 | b'debug-rev-list', |
|
351 | 351 | [ |
|
352 | 352 | (b'H', b'header', None, _(b'header')), |
|
353 | 353 | (b't', b'topo-order', None, _(b'topo-order')), |
|
354 | 354 | (b'p', b'parents', None, _(b'parents')), |
|
355 | 355 | (b'n', b'max-count', 0, _(b'max-count')), |
|
356 | 356 | ], |
|
357 | 357 | b'[OPTION]... REV...', |
|
358 | 358 | ) |
|
359 | 359 | def revlist(ui, repo, *revs, **opts): |
|
360 | 360 | """print revisions""" |
|
361 | 361 | if opts['header']: |
|
362 | 362 | full = b"commit" |
|
363 | 363 | else: |
|
364 | 364 | full = None |
|
365 | 365 | copy = [x for x in revs] |
|
366 | 366 | revtree(ui, copy, repo, full, opts['max_count'], opts[r'parents']) |
|
367 | 367 | |
|
368 | 368 | |
|
369 | 369 | @command( |
|
370 | 370 | b'view', |
|
371 | 371 | [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))], |
|
372 | 372 | _(b'[-l LIMIT] [REVRANGE]'), |
|
373 | 373 | helpcategory=command.CATEGORY_CHANGE_NAVIGATION, |
|
374 | 374 | ) |
|
375 | 375 | def view(ui, repo, *etc, **opts): |
|
376 | 376 | """start interactive history viewer""" |
|
377 | 377 | opts = pycompat.byteskwargs(opts) |
|
378 | 378 | os.chdir(repo.root) |
|
379 | optstr = b' '.join( | |
|
380 | [b'--%s %s' % (k, v) for k, v in pycompat.iteritems(opts) if v] | |
|
381 | ) | |
|
379 | optstr = b' '.join([b'--%s %s' % (k, v) for k, v in opts.items() if v]) | |
|
382 | 380 | if repo.filtername is None: |
|
383 | 381 | optstr += b'--hidden' |
|
384 | 382 | |
|
385 | 383 | cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc)) |
|
386 | 384 | ui.debug(b"running %s\n" % cmd) |
|
387 | 385 | ui.system(cmd, blockedtag=b'hgk_view') |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now