Show More
The requested changes are too big and content was truncated. Show full diff
@@ -1,1166 +1,1165 b'' | |||
|
1 | 1 | # absorb.py |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """apply working directory changes to changesets (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | The absorb extension provides a command to use annotate information to |
|
11 | 11 | amend modified chunks into the corresponding non-public changesets. |
|
12 | 12 | |
|
13 | 13 | :: |
|
14 | 14 | |
|
15 | 15 | [absorb] |
|
16 | 16 | # only check 50 recent non-public changesets at most |
|
17 | 17 | max-stack-size = 50 |
|
18 | 18 | # whether to add noise to new commits to avoid obsolescence cycle |
|
19 | 19 | add-noise = 1 |
|
20 | 20 | # make `amend --correlated` a shortcut to the main command |
|
21 | 21 | amend-flag = correlated |
|
22 | 22 | |
|
23 | 23 | [color] |
|
24 | 24 | absorb.description = yellow |
|
25 | 25 | absorb.node = blue bold |
|
26 | 26 | absorb.path = bold |
|
27 | 27 | """ |
|
28 | 28 | |
|
29 | 29 | # TODO: |
|
30 | 30 | # * Rename config items to [commands] namespace |
|
31 | 31 | # * Converge getdraftstack() with other code in core |
|
32 | 32 | # * move many attributes on fixupstate to be private |
|
33 | 33 | |
|
34 | 34 | from __future__ import absolute_import |
|
35 | 35 | |
|
36 | 36 | import collections |
|
37 | 37 | |
|
38 | 38 | from mercurial.i18n import _ |
|
39 | 39 | from mercurial.node import ( |
|
40 | 40 | hex, |
|
41 | nullid, | |
|
42 | 41 | short, |
|
43 | 42 | ) |
|
44 | 43 | from mercurial import ( |
|
45 | 44 | cmdutil, |
|
46 | 45 | commands, |
|
47 | 46 | context, |
|
48 | 47 | crecord, |
|
49 | 48 | error, |
|
50 | 49 | linelog, |
|
51 | 50 | mdiff, |
|
52 | 51 | obsolete, |
|
53 | 52 | patch, |
|
54 | 53 | phases, |
|
55 | 54 | pycompat, |
|
56 | 55 | registrar, |
|
57 | 56 | rewriteutil, |
|
58 | 57 | scmutil, |
|
59 | 58 | util, |
|
60 | 59 | ) |
|
61 | 60 | from mercurial.utils import stringutil |
|
62 | 61 | |
|
63 | 62 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
64 | 63 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
65 | 64 | # be specifying the version(s) of Mercurial they are tested with, or |
|
66 | 65 | # leave the attribute unspecified. |
|
67 | 66 | testedwith = b'ships-with-hg-core' |
|
68 | 67 | |
|
69 | 68 | cmdtable = {} |
|
70 | 69 | command = registrar.command(cmdtable) |
|
71 | 70 | |
|
72 | 71 | configtable = {} |
|
73 | 72 | configitem = registrar.configitem(configtable) |
|
74 | 73 | |
|
75 | 74 | configitem(b'absorb', b'add-noise', default=True) |
|
76 | 75 | configitem(b'absorb', b'amend-flag', default=None) |
|
77 | 76 | configitem(b'absorb', b'max-stack-size', default=50) |
|
78 | 77 | |
|
79 | 78 | colortable = { |
|
80 | 79 | b'absorb.description': b'yellow', |
|
81 | 80 | b'absorb.node': b'blue bold', |
|
82 | 81 | b'absorb.path': b'bold', |
|
83 | 82 | } |
|
84 | 83 | |
|
85 | 84 | defaultdict = collections.defaultdict |
|
86 | 85 | |
|
87 | 86 | |
|
88 | 87 | class nullui(object): |
|
89 | 88 | """blank ui object doing nothing""" |
|
90 | 89 | |
|
91 | 90 | debugflag = False |
|
92 | 91 | verbose = False |
|
93 | 92 | quiet = True |
|
94 | 93 | |
|
95 | 94 | def __getitem__(name): |
|
96 | 95 | def nullfunc(*args, **kwds): |
|
97 | 96 | return |
|
98 | 97 | |
|
99 | 98 | return nullfunc |
|
100 | 99 | |
|
101 | 100 | |
|
102 | 101 | class emptyfilecontext(object): |
|
103 | 102 | """minimal filecontext representing an empty file""" |
|
104 | 103 | |
|
105 | 104 | def __init__(self, repo): |
|
106 | 105 | self._repo = repo |
|
107 | 106 | |
|
108 | 107 | def data(self): |
|
109 | 108 | return b'' |
|
110 | 109 | |
|
111 | 110 | def node(self): |
|
112 | return nullid | |
|
111 | return self._repo.nullid | |
|
113 | 112 | |
|
114 | 113 | |
|
115 | 114 | def uniq(lst): |
|
116 | 115 | """list -> list. remove duplicated items without changing the order""" |
|
117 | 116 | seen = set() |
|
118 | 117 | result = [] |
|
119 | 118 | for x in lst: |
|
120 | 119 | if x not in seen: |
|
121 | 120 | seen.add(x) |
|
122 | 121 | result.append(x) |
|
123 | 122 | return result |
|
124 | 123 | |
|
125 | 124 | |
|
126 | 125 | def getdraftstack(headctx, limit=None): |
|
127 | 126 | """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets. |
|
128 | 127 | |
|
129 | 128 | changesets are sorted in topo order, oldest first. |
|
130 | 129 | return at most limit items, if limit is a positive number. |
|
131 | 130 | |
|
132 | 131 | merges are considered as non-draft as well. i.e. every commit |
|
133 | 132 | returned has and only has 1 parent. |
|
134 | 133 | """ |
|
135 | 134 | ctx = headctx |
|
136 | 135 | result = [] |
|
137 | 136 | while ctx.phase() != phases.public: |
|
138 | 137 | if limit and len(result) >= limit: |
|
139 | 138 | break |
|
140 | 139 | parents = ctx.parents() |
|
141 | 140 | if len(parents) != 1: |
|
142 | 141 | break |
|
143 | 142 | result.append(ctx) |
|
144 | 143 | ctx = parents[0] |
|
145 | 144 | result.reverse() |
|
146 | 145 | return result |
|
147 | 146 | |
|
148 | 147 | |
|
149 | 148 | def getfilestack(stack, path, seenfctxs=None): |
|
150 | 149 | """([ctx], str, set) -> [fctx], {ctx: fctx} |
|
151 | 150 | |
|
152 | 151 | stack is a list of contexts, from old to new. usually they are what |
|
153 | 152 | "getdraftstack" returns. |
|
154 | 153 | |
|
155 | 154 | follows renames, but not copies. |
|
156 | 155 | |
|
157 | 156 | seenfctxs is a set of filecontexts that will be considered "immutable". |
|
158 | 157 | they are usually what this function returned in earlier calls, useful |
|
159 | 158 | to avoid issues that a file was "moved" to multiple places and was then |
|
160 | 159 | modified differently, like: "a" was copied to "b", "a" was also copied to |
|
161 | 160 | "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a" |
|
162 | 161 | and we enforce only one of them to be able to affect "a"'s content. |
|
163 | 162 | |
|
164 | 163 | return an empty list and an empty dict, if the specified path does not |
|
165 | 164 | exist in stack[-1] (the top of the stack). |
|
166 | 165 | |
|
167 | 166 | otherwise, return a list of de-duplicated filecontexts, and the map to |
|
168 | 167 | convert ctx in the stack to fctx, for possible mutable fctxs. the first item |
|
169 | 168 | of the list would be outside the stack and should be considered immutable. |
|
170 | 169 | the remaining items are within the stack. |
|
171 | 170 | |
|
172 | 171 | for example, given the following changelog and corresponding filelog |
|
173 | 172 | revisions: |
|
174 | 173 | |
|
175 | 174 | changelog: 3----4----5----6----7 |
|
176 | 175 | filelog: x 0----1----1----2 (x: no such file yet) |
|
177 | 176 | |
|
178 | 177 | - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2}) |
|
179 | 178 | - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a |
|
180 | 179 | dummy empty filecontext. |
|
181 | 180 | - if stack = [2], returns ([], {}) |
|
182 | 181 | - if stack = [7], returns ([1, 2], {7: 2}) |
|
183 | 182 | - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be |
|
184 | 183 | removed, since 1 is immutable. |
|
185 | 184 | """ |
|
186 | 185 | if seenfctxs is None: |
|
187 | 186 | seenfctxs = set() |
|
188 | 187 | assert stack |
|
189 | 188 | |
|
190 | 189 | if path not in stack[-1]: |
|
191 | 190 | return [], {} |
|
192 | 191 | |
|
193 | 192 | fctxs = [] |
|
194 | 193 | fctxmap = {} |
|
195 | 194 | |
|
196 | 195 | pctx = stack[0].p1() # the public (immutable) ctx we stop at |
|
197 | 196 | for ctx in reversed(stack): |
|
198 | 197 | if path not in ctx: # the file is added in the next commit |
|
199 | 198 | pctx = ctx |
|
200 | 199 | break |
|
201 | 200 | fctx = ctx[path] |
|
202 | 201 | fctxs.append(fctx) |
|
203 | 202 | if fctx in seenfctxs: # treat fctx as the immutable one |
|
204 | 203 | pctx = None # do not add another immutable fctx |
|
205 | 204 | break |
|
206 | 205 | fctxmap[ctx] = fctx # only for mutable fctxs |
|
207 | 206 | copy = fctx.copysource() |
|
208 | 207 | if copy: |
|
209 | 208 | path = copy # follow rename |
|
210 | 209 | if path in ctx: # but do not follow copy |
|
211 | 210 | pctx = ctx.p1() |
|
212 | 211 | break |
|
213 | 212 | |
|
214 | 213 | if pctx is not None: # need an extra immutable fctx |
|
215 | 214 | if path in pctx: |
|
216 | 215 | fctxs.append(pctx[path]) |
|
217 | 216 | else: |
|
218 | 217 | fctxs.append(emptyfilecontext(pctx.repo())) |
|
219 | 218 | |
|
220 | 219 | fctxs.reverse() |
|
221 | 220 | # note: we rely on a property of hg: filerev is not reused for linear |
|
222 | 221 | # history. i.e. it's impossible to have: |
|
223 | 222 | # changelog: 4----5----6 (linear, no merges) |
|
224 | 223 | # filelog: 1----2----1 |
|
225 | 224 | # ^ reuse filerev (impossible) |
|
226 | 225 | # because parents are part of the hash. if that's not true, we need to |
|
227 | 226 | # remove uniq and find a different way to identify fctxs. |
|
228 | 227 | return uniq(fctxs), fctxmap |
|
229 | 228 | |
|
230 | 229 | |
|
231 | 230 | class overlaystore(patch.filestore): |
|
232 | 231 | """read-only, hybrid store based on a dict and ctx. |
|
233 | 232 | memworkingcopy: {path: content}, overrides file contents. |
|
234 | 233 | """ |
|
235 | 234 | |
|
236 | 235 | def __init__(self, basectx, memworkingcopy): |
|
237 | 236 | self.basectx = basectx |
|
238 | 237 | self.memworkingcopy = memworkingcopy |
|
239 | 238 | |
|
240 | 239 | def getfile(self, path): |
|
241 | 240 | """comply with mercurial.patch.filestore.getfile""" |
|
242 | 241 | if path not in self.basectx: |
|
243 | 242 | return None, None, None |
|
244 | 243 | fctx = self.basectx[path] |
|
245 | 244 | if path in self.memworkingcopy: |
|
246 | 245 | content = self.memworkingcopy[path] |
|
247 | 246 | else: |
|
248 | 247 | content = fctx.data() |
|
249 | 248 | mode = (fctx.islink(), fctx.isexec()) |
|
250 | 249 | copy = fctx.copysource() |
|
251 | 250 | return content, mode, copy |
|
252 | 251 | |
|
253 | 252 | |
|
254 | 253 | def overlaycontext(memworkingcopy, ctx, parents=None, extra=None, desc=None): |
|
255 | 254 | """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx |
|
256 | 255 | memworkingcopy overrides file contents. |
|
257 | 256 | """ |
|
258 | 257 | # parents must contain 2 items: (node1, node2) |
|
259 | 258 | if parents is None: |
|
260 | 259 | parents = ctx.repo().changelog.parents(ctx.node()) |
|
261 | 260 | if extra is None: |
|
262 | 261 | extra = ctx.extra() |
|
263 | 262 | if desc is None: |
|
264 | 263 | desc = ctx.description() |
|
265 | 264 | date = ctx.date() |
|
266 | 265 | user = ctx.user() |
|
267 | 266 | files = set(ctx.files()).union(memworkingcopy) |
|
268 | 267 | store = overlaystore(ctx, memworkingcopy) |
|
269 | 268 | return context.memctx( |
|
270 | 269 | repo=ctx.repo(), |
|
271 | 270 | parents=parents, |
|
272 | 271 | text=desc, |
|
273 | 272 | files=files, |
|
274 | 273 | filectxfn=store, |
|
275 | 274 | user=user, |
|
276 | 275 | date=date, |
|
277 | 276 | branch=None, |
|
278 | 277 | extra=extra, |
|
279 | 278 | ) |
|
280 | 279 | |
|
281 | 280 | |
|
282 | 281 | class filefixupstate(object): |
|
283 | 282 | """state needed to apply fixups to a single file |
|
284 | 283 | |
|
285 | 284 | internally, it keeps file contents of several revisions and a linelog. |
|
286 | 285 | |
|
287 | 286 | the linelog uses odd revision numbers for original contents (fctxs passed |
|
288 | 287 | to __init__), and even revision numbers for fixups, like: |
|
289 | 288 | |
|
290 | 289 | linelog rev 1: self.fctxs[0] (from an immutable "public" changeset) |
|
291 | 290 | linelog rev 2: fixups made to self.fctxs[0] |
|
292 | 291 | linelog rev 3: self.fctxs[1] (a child of fctxs[0]) |
|
293 | 292 | linelog rev 4: fixups made to self.fctxs[1] |
|
294 | 293 | ... |
|
295 | 294 | |
|
296 | 295 | a typical use is like: |
|
297 | 296 | |
|
298 | 297 | 1. call diffwith, to calculate self.fixups |
|
299 | 298 | 2. (optionally), present self.fixups to the user, or change it |
|
300 | 299 | 3. call apply, to apply changes |
|
301 | 300 | 4. read results from "finalcontents", or call getfinalcontent |
|
302 | 301 | """ |
|
303 | 302 | |
|
304 | 303 | def __init__(self, fctxs, path, ui=None, opts=None): |
|
305 | 304 | """([fctx], ui or None) -> None |
|
306 | 305 | |
|
307 | 306 | fctxs should be linear, and sorted by topo order - oldest first. |
|
308 | 307 | fctxs[0] will be considered as "immutable" and will not be changed. |
|
309 | 308 | """ |
|
310 | 309 | self.fctxs = fctxs |
|
311 | 310 | self.path = path |
|
312 | 311 | self.ui = ui or nullui() |
|
313 | 312 | self.opts = opts or {} |
|
314 | 313 | |
|
315 | 314 | # following fields are built from fctxs. they exist for perf reason |
|
316 | 315 | self.contents = [f.data() for f in fctxs] |
|
317 | 316 | self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents) |
|
318 | 317 | self.linelog = self._buildlinelog() |
|
319 | 318 | if self.ui.debugflag: |
|
320 | 319 | assert self._checkoutlinelog() == self.contents |
|
321 | 320 | |
|
322 | 321 | # following fields will be filled later |
|
323 | 322 | self.chunkstats = [0, 0] # [adopted, total : int] |
|
324 | 323 | self.targetlines = [] # [str] |
|
325 | 324 | self.fixups = [] # [(linelog rev, a1, a2, b1, b2)] |
|
326 | 325 | self.finalcontents = [] # [str] |
|
327 | 326 | self.ctxaffected = set() |
|
328 | 327 | |
|
329 | 328 | def diffwith(self, targetfctx, fm=None): |
|
330 | 329 | """calculate fixups needed by examining the differences between |
|
331 | 330 | self.fctxs[-1] and targetfctx, chunk by chunk. |
|
332 | 331 | |
|
333 | 332 | targetfctx is the target state we move towards. we may or may not be |
|
334 | 333 | able to get there because not all modified chunks can be amended into |
|
335 | 334 | a non-public fctx unambiguously. |
|
336 | 335 | |
|
337 | 336 | call this only once, before apply(). |
|
338 | 337 | |
|
339 | 338 | update self.fixups, self.chunkstats, and self.targetlines. |
|
340 | 339 | """ |
|
341 | 340 | a = self.contents[-1] |
|
342 | 341 | alines = self.contentlines[-1] |
|
343 | 342 | b = targetfctx.data() |
|
344 | 343 | blines = mdiff.splitnewlines(b) |
|
345 | 344 | self.targetlines = blines |
|
346 | 345 | |
|
347 | 346 | self.linelog.annotate(self.linelog.maxrev) |
|
348 | 347 | annotated = self.linelog.annotateresult # [(linelog rev, linenum)] |
|
349 | 348 | assert len(annotated) == len(alines) |
|
350 | 349 | # add a dummy end line to make insertion at the end easier |
|
351 | 350 | if annotated: |
|
352 | 351 | dummyendline = (annotated[-1][0], annotated[-1][1] + 1) |
|
353 | 352 | annotated.append(dummyendline) |
|
354 | 353 | |
|
355 | 354 | # analyse diff blocks |
|
356 | 355 | for chunk in self._alldiffchunks(a, b, alines, blines): |
|
357 | 356 | newfixups = self._analysediffchunk(chunk, annotated) |
|
358 | 357 | self.chunkstats[0] += bool(newfixups) # 1 or 0 |
|
359 | 358 | self.chunkstats[1] += 1 |
|
360 | 359 | self.fixups += newfixups |
|
361 | 360 | if fm is not None: |
|
362 | 361 | self._showchanges(fm, alines, blines, chunk, newfixups) |
|
363 | 362 | |
|
364 | 363 | def apply(self): |
|
365 | 364 | """apply self.fixups. update self.linelog, self.finalcontents. |
|
366 | 365 | |
|
367 | 366 | call this only once, before getfinalcontent(), after diffwith(). |
|
368 | 367 | """ |
|
369 | 368 | # the following is unnecessary, as it's done by "diffwith": |
|
370 | 369 | # self.linelog.annotate(self.linelog.maxrev) |
|
371 | 370 | for rev, a1, a2, b1, b2 in reversed(self.fixups): |
|
372 | 371 | blines = self.targetlines[b1:b2] |
|
373 | 372 | if self.ui.debugflag: |
|
374 | 373 | idx = (max(rev - 1, 0)) // 2 |
|
375 | 374 | self.ui.write( |
|
376 | 375 | _(b'%s: chunk %d:%d -> %d lines\n') |
|
377 | 376 | % (short(self.fctxs[idx].node()), a1, a2, len(blines)) |
|
378 | 377 | ) |
|
379 | 378 | self.linelog.replacelines(rev, a1, a2, b1, b2) |
|
380 | 379 | if self.opts.get(b'edit_lines', False): |
|
381 | 380 | self.finalcontents = self._checkoutlinelogwithedits() |
|
382 | 381 | else: |
|
383 | 382 | self.finalcontents = self._checkoutlinelog() |
|
384 | 383 | |
|
385 | 384 | def getfinalcontent(self, fctx): |
|
386 | 385 | """(fctx) -> str. get modified file content for a given filecontext""" |
|
387 | 386 | idx = self.fctxs.index(fctx) |
|
388 | 387 | return self.finalcontents[idx] |
|
389 | 388 | |
|
390 | 389 | def _analysediffchunk(self, chunk, annotated): |
|
391 | 390 | """analyse a different chunk and return new fixups found |
|
392 | 391 | |
|
393 | 392 | return [] if no lines from the chunk can be safely applied. |
|
394 | 393 | |
|
395 | 394 | the chunk (or lines) cannot be safely applied, if, for example: |
|
396 | 395 | - the modified (deleted) lines belong to a public changeset |
|
397 | 396 | (self.fctxs[0]) |
|
398 | 397 | - the chunk is a pure insertion and the adjacent lines (at most 2 |
|
399 | 398 | lines) belong to different non-public changesets, or do not belong |
|
400 | 399 | to any non-public changesets. |
|
401 | 400 | - the chunk is modifying lines from different changesets. |
|
402 | 401 | in this case, if the number of lines deleted equals to the number |
|
403 | 402 | of lines added, assume it's a simple 1:1 map (could be wrong). |
|
404 | 403 | otherwise, give up. |
|
405 | 404 | - the chunk is modifying lines from a single non-public changeset, |
|
406 | 405 | but other revisions touch the area as well. i.e. the lines are |
|
407 | 406 | not continuous as seen from the linelog. |
|
408 | 407 | """ |
|
409 | 408 | a1, a2, b1, b2 = chunk |
|
410 | 409 | # find involved indexes from annotate result |
|
411 | 410 | involved = annotated[a1:a2] |
|
412 | 411 | if not involved and annotated: # a1 == a2 and a is not empty |
|
413 | 412 | # pure insertion, check nearby lines. ignore lines belong |
|
414 | 413 | # to the public (first) changeset (i.e. annotated[i][0] == 1) |
|
415 | 414 | nearbylinenums = {a2, max(0, a1 - 1)} |
|
416 | 415 | involved = [ |
|
417 | 416 | annotated[i] for i in nearbylinenums if annotated[i][0] != 1 |
|
418 | 417 | ] |
|
419 | 418 | involvedrevs = list({r for r, l in involved}) |
|
420 | 419 | newfixups = [] |
|
421 | 420 | if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True): |
|
422 | 421 | # chunk belongs to a single revision |
|
423 | 422 | rev = involvedrevs[0] |
|
424 | 423 | if rev > 1: |
|
425 | 424 | fixuprev = rev + 1 |
|
426 | 425 | newfixups.append((fixuprev, a1, a2, b1, b2)) |
|
427 | 426 | elif a2 - a1 == b2 - b1 or b1 == b2: |
|
428 | 427 | # 1:1 line mapping, or chunk was deleted |
|
429 | 428 | for i in pycompat.xrange(a1, a2): |
|
430 | 429 | rev, linenum = annotated[i] |
|
431 | 430 | if rev > 1: |
|
432 | 431 | if b1 == b2: # deletion, simply remove that single line |
|
433 | 432 | nb1 = nb2 = 0 |
|
434 | 433 | else: # 1:1 line mapping, change the corresponding rev |
|
435 | 434 | nb1 = b1 + i - a1 |
|
436 | 435 | nb2 = nb1 + 1 |
|
437 | 436 | fixuprev = rev + 1 |
|
438 | 437 | newfixups.append((fixuprev, i, i + 1, nb1, nb2)) |
|
439 | 438 | return self._optimizefixups(newfixups) |
|
440 | 439 | |
|
441 | 440 | @staticmethod |
|
442 | 441 | def _alldiffchunks(a, b, alines, blines): |
|
443 | 442 | """like mdiff.allblocks, but only care about differences""" |
|
444 | 443 | blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines) |
|
445 | 444 | for chunk, btype in blocks: |
|
446 | 445 | if btype != b'!': |
|
447 | 446 | continue |
|
448 | 447 | yield chunk |
|
449 | 448 | |
|
450 | 449 | def _buildlinelog(self): |
|
451 | 450 | """calculate the initial linelog based on self.content{,line}s. |
|
452 | 451 | this is similar to running a partial "annotate". |
|
453 | 452 | """ |
|
454 | 453 | llog = linelog.linelog() |
|
455 | 454 | a, alines = b'', [] |
|
456 | 455 | for i in pycompat.xrange(len(self.contents)): |
|
457 | 456 | b, blines = self.contents[i], self.contentlines[i] |
|
458 | 457 | llrev = i * 2 + 1 |
|
459 | 458 | chunks = self._alldiffchunks(a, b, alines, blines) |
|
460 | 459 | for a1, a2, b1, b2 in reversed(list(chunks)): |
|
461 | 460 | llog.replacelines(llrev, a1, a2, b1, b2) |
|
462 | 461 | a, alines = b, blines |
|
463 | 462 | return llog |
|
464 | 463 | |
|
465 | 464 | def _checkoutlinelog(self): |
|
466 | 465 | """() -> [str]. check out file contents from linelog""" |
|
467 | 466 | contents = [] |
|
468 | 467 | for i in pycompat.xrange(len(self.contents)): |
|
469 | 468 | rev = (i + 1) * 2 |
|
470 | 469 | self.linelog.annotate(rev) |
|
471 | 470 | content = b''.join(map(self._getline, self.linelog.annotateresult)) |
|
472 | 471 | contents.append(content) |
|
473 | 472 | return contents |
|
474 | 473 | |
|
475 | 474 | def _checkoutlinelogwithedits(self): |
|
476 | 475 | """() -> [str]. prompt all lines for edit""" |
|
477 | 476 | alllines = self.linelog.getalllines() |
|
478 | 477 | # header |
|
479 | 478 | editortext = ( |
|
480 | 479 | _( |
|
481 | 480 | b'HG: editing %s\nHG: "y" means the line to the right ' |
|
482 | 481 | b'exists in the changeset to the top\nHG:\n' |
|
483 | 482 | ) |
|
484 | 483 | % self.fctxs[-1].path() |
|
485 | 484 | ) |
|
486 | 485 | # [(idx, fctx)]. hide the dummy emptyfilecontext |
|
487 | 486 | visiblefctxs = [ |
|
488 | 487 | (i, f) |
|
489 | 488 | for i, f in enumerate(self.fctxs) |
|
490 | 489 | if not isinstance(f, emptyfilecontext) |
|
491 | 490 | ] |
|
492 | 491 | for i, (j, f) in enumerate(visiblefctxs): |
|
493 | 492 | editortext += _(b'HG: %s/%s %s %s\n') % ( |
|
494 | 493 | b'|' * i, |
|
495 | 494 | b'-' * (len(visiblefctxs) - i + 1), |
|
496 | 495 | short(f.node()), |
|
497 | 496 | f.description().split(b'\n', 1)[0], |
|
498 | 497 | ) |
|
499 | 498 | editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs)) |
|
500 | 499 | # figure out the lifetime of a line, this is relatively inefficient, |
|
501 | 500 | # but probably fine |
|
502 | 501 | lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}} |
|
503 | 502 | for i, f in visiblefctxs: |
|
504 | 503 | self.linelog.annotate((i + 1) * 2) |
|
505 | 504 | for l in self.linelog.annotateresult: |
|
506 | 505 | lineset[l].add(i) |
|
507 | 506 | # append lines |
|
508 | 507 | for l in alllines: |
|
509 | 508 | editortext += b' %s : %s' % ( |
|
510 | 509 | b''.join( |
|
511 | 510 | [ |
|
512 | 511 | (b'y' if i in lineset[l] else b' ') |
|
513 | 512 | for i, _f in visiblefctxs |
|
514 | 513 | ] |
|
515 | 514 | ), |
|
516 | 515 | self._getline(l), |
|
517 | 516 | ) |
|
518 | 517 | # run editor |
|
519 | 518 | editedtext = self.ui.edit(editortext, b'', action=b'absorb') |
|
520 | 519 | if not editedtext: |
|
521 | 520 | raise error.InputError(_(b'empty editor text')) |
|
522 | 521 | # parse edited result |
|
523 | 522 | contents = [b''] * len(self.fctxs) |
|
524 | 523 | leftpadpos = 4 |
|
525 | 524 | colonpos = leftpadpos + len(visiblefctxs) + 1 |
|
526 | 525 | for l in mdiff.splitnewlines(editedtext): |
|
527 | 526 | if l.startswith(b'HG:'): |
|
528 | 527 | continue |
|
529 | 528 | if l[colonpos - 1 : colonpos + 2] != b' : ': |
|
530 | 529 | raise error.InputError(_(b'malformed line: %s') % l) |
|
531 | 530 | linecontent = l[colonpos + 2 :] |
|
532 | 531 | for i, ch in enumerate( |
|
533 | 532 | pycompat.bytestr(l[leftpadpos : colonpos - 1]) |
|
534 | 533 | ): |
|
535 | 534 | if ch == b'y': |
|
536 | 535 | contents[visiblefctxs[i][0]] += linecontent |
|
537 | 536 | # chunkstats is hard to calculate if anything changes, therefore |
|
538 | 537 | # set them to just a simple value (1, 1). |
|
539 | 538 | if editedtext != editortext: |
|
540 | 539 | self.chunkstats = [1, 1] |
|
541 | 540 | return contents |
|
542 | 541 | |
|
543 | 542 | def _getline(self, lineinfo): |
|
544 | 543 | """((rev, linenum)) -> str. convert rev+line number to line content""" |
|
545 | 544 | rev, linenum = lineinfo |
|
546 | 545 | if rev & 1: # odd: original line taken from fctxs |
|
547 | 546 | return self.contentlines[rev // 2][linenum] |
|
548 | 547 | else: # even: fixup line from targetfctx |
|
549 | 548 | return self.targetlines[linenum] |
|
550 | 549 | |
|
551 | 550 | def _iscontinuous(self, a1, a2, closedinterval=False): |
|
552 | 551 | """(a1, a2 : int) -> bool |
|
553 | 552 | |
|
554 | 553 | check if these lines are continuous. i.e. no other insertions or |
|
555 | 554 | deletions (from other revisions) among these lines. |
|
556 | 555 | |
|
557 | 556 | closedinterval decides whether a2 should be included or not. i.e. is |
|
558 | 557 | it [a1, a2), or [a1, a2] ? |
|
559 | 558 | """ |
|
560 | 559 | if a1 >= a2: |
|
561 | 560 | return True |
|
562 | 561 | llog = self.linelog |
|
563 | 562 | offset1 = llog.getoffset(a1) |
|
564 | 563 | offset2 = llog.getoffset(a2) + int(closedinterval) |
|
565 | 564 | linesinbetween = llog.getalllines(offset1, offset2) |
|
566 | 565 | return len(linesinbetween) == a2 - a1 + int(closedinterval) |
|
567 | 566 | |
|
568 | 567 | def _optimizefixups(self, fixups): |
|
569 | 568 | """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)]. |
|
570 | 569 | merge adjacent fixups to make them less fragmented. |
|
571 | 570 | """ |
|
572 | 571 | result = [] |
|
573 | 572 | pcurrentchunk = [[-1, -1, -1, -1, -1]] |
|
574 | 573 | |
|
575 | 574 | def pushchunk(): |
|
576 | 575 | if pcurrentchunk[0][0] != -1: |
|
577 | 576 | result.append(tuple(pcurrentchunk[0])) |
|
578 | 577 | |
|
579 | 578 | for i, chunk in enumerate(fixups): |
|
580 | 579 | rev, a1, a2, b1, b2 = chunk |
|
581 | 580 | lastrev = pcurrentchunk[0][0] |
|
582 | 581 | lasta2 = pcurrentchunk[0][2] |
|
583 | 582 | lastb2 = pcurrentchunk[0][4] |
|
584 | 583 | if ( |
|
585 | 584 | a1 == lasta2 |
|
586 | 585 | and b1 == lastb2 |
|
587 | 586 | and rev == lastrev |
|
588 | 587 | and self._iscontinuous(max(a1 - 1, 0), a1) |
|
589 | 588 | ): |
|
590 | 589 | # merge into currentchunk |
|
591 | 590 | pcurrentchunk[0][2] = a2 |
|
592 | 591 | pcurrentchunk[0][4] = b2 |
|
593 | 592 | else: |
|
594 | 593 | pushchunk() |
|
595 | 594 | pcurrentchunk[0] = list(chunk) |
|
596 | 595 | pushchunk() |
|
597 | 596 | return result |
|
598 | 597 | |
|
599 | 598 | def _showchanges(self, fm, alines, blines, chunk, fixups): |
|
600 | 599 | def trim(line): |
|
601 | 600 | if line.endswith(b'\n'): |
|
602 | 601 | line = line[:-1] |
|
603 | 602 | return line |
|
604 | 603 | |
|
605 | 604 | # this is not optimized for perf but _showchanges only gets executed |
|
606 | 605 | # with an extra command-line flag. |
|
607 | 606 | a1, a2, b1, b2 = chunk |
|
608 | 607 | aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1) |
|
609 | 608 | for idx, fa1, fa2, fb1, fb2 in fixups: |
|
610 | 609 | for i in pycompat.xrange(fa1, fa2): |
|
611 | 610 | aidxs[i - a1] = (max(idx, 1) - 1) // 2 |
|
612 | 611 | for i in pycompat.xrange(fb1, fb2): |
|
613 | 612 | bidxs[i - b1] = (max(idx, 1) - 1) // 2 |
|
614 | 613 | |
|
615 | 614 | fm.startitem() |
|
616 | 615 | fm.write( |
|
617 | 616 | b'hunk', |
|
618 | 617 | b' %s\n', |
|
619 | 618 | b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1), |
|
620 | 619 | label=b'diff.hunk', |
|
621 | 620 | ) |
|
622 | 621 | fm.data(path=self.path, linetype=b'hunk') |
|
623 | 622 | |
|
624 | 623 | def writeline(idx, diffchar, line, linetype, linelabel): |
|
625 | 624 | fm.startitem() |
|
626 | 625 | node = b'' |
|
627 | 626 | if idx: |
|
628 | 627 | ctx = self.fctxs[idx] |
|
629 | 628 | fm.context(fctx=ctx) |
|
630 | 629 | node = ctx.hex() |
|
631 | 630 | self.ctxaffected.add(ctx.changectx()) |
|
632 | 631 | fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node') |
|
633 | 632 | fm.write( |
|
634 | 633 | b'diffchar ' + linetype, |
|
635 | 634 | b'%s%s\n', |
|
636 | 635 | diffchar, |
|
637 | 636 | line, |
|
638 | 637 | label=linelabel, |
|
639 | 638 | ) |
|
640 | 639 | fm.data(path=self.path, linetype=linetype) |
|
641 | 640 | |
|
642 | 641 | for i in pycompat.xrange(a1, a2): |
|
643 | 642 | writeline( |
|
644 | 643 | aidxs[i - a1], |
|
645 | 644 | b'-', |
|
646 | 645 | trim(alines[i]), |
|
647 | 646 | b'deleted', |
|
648 | 647 | b'diff.deleted', |
|
649 | 648 | ) |
|
650 | 649 | for i in pycompat.xrange(b1, b2): |
|
651 | 650 | writeline( |
|
652 | 651 | bidxs[i - b1], |
|
653 | 652 | b'+', |
|
654 | 653 | trim(blines[i]), |
|
655 | 654 | b'inserted', |
|
656 | 655 | b'diff.inserted', |
|
657 | 656 | ) |
|
658 | 657 | |
|
659 | 658 | |
|
660 | 659 | class fixupstate(object): |
|
661 | 660 | """state needed to run absorb |
|
662 | 661 | |
|
663 | 662 | internally, it keeps paths and filefixupstates. |
|
664 | 663 | |
|
665 | 664 | a typical use is like filefixupstates: |
|
666 | 665 | |
|
667 | 666 | 1. call diffwith, to calculate fixups |
|
668 | 667 | 2. (optionally), present fixups to the user, or edit fixups |
|
669 | 668 | 3. call apply, to apply changes to memory |
|
670 | 669 | 4. call commit, to commit changes to hg database |
|
671 | 670 | """ |
|
672 | 671 | |
|
673 | 672 | def __init__(self, stack, ui=None, opts=None): |
|
674 | 673 | """([ctx], ui or None) -> None |
|
675 | 674 | |
|
676 | 675 | stack: should be linear, and sorted by topo order - oldest first. |
|
677 | 676 | all commits in stack are considered mutable. |
|
678 | 677 | """ |
|
679 | 678 | assert stack |
|
680 | 679 | self.ui = ui or nullui() |
|
681 | 680 | self.opts = opts or {} |
|
682 | 681 | self.stack = stack |
|
683 | 682 | self.repo = stack[-1].repo().unfiltered() |
|
684 | 683 | |
|
685 | 684 | # following fields will be filled later |
|
686 | 685 | self.paths = [] # [str] |
|
687 | 686 | self.status = None # ctx.status output |
|
688 | 687 | self.fctxmap = {} # {path: {ctx: fctx}} |
|
689 | 688 | self.fixupmap = {} # {path: filefixupstate} |
|
690 | 689 | self.replacemap = {} # {oldnode: newnode or None} |
|
691 | 690 | self.finalnode = None # head after all fixups |
|
692 | 691 | self.ctxaffected = set() # ctx that will be absorbed into |
|
693 | 692 | |
|
694 | 693 | def diffwith(self, targetctx, match=None, fm=None): |
|
695 | 694 | """diff and prepare fixups. update self.fixupmap, self.paths""" |
|
696 | 695 | # only care about modified files |
|
697 | 696 | self.status = self.stack[-1].status(targetctx, match) |
|
698 | 697 | self.paths = [] |
|
699 | 698 | # but if --edit-lines is used, the user may want to edit files |
|
700 | 699 | # even if they are not modified |
|
701 | 700 | editopt = self.opts.get(b'edit_lines') |
|
702 | 701 | if not self.status.modified and editopt and match: |
|
703 | 702 | interestingpaths = match.files() |
|
704 | 703 | else: |
|
705 | 704 | interestingpaths = self.status.modified |
|
706 | 705 | # prepare the filefixupstate |
|
707 | 706 | seenfctxs = set() |
|
708 | 707 | # sorting is necessary to eliminate ambiguity for the "double move" |
|
709 | 708 | # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A". |
|
710 | 709 | for path in sorted(interestingpaths): |
|
711 | 710 | self.ui.debug(b'calculating fixups for %s\n' % path) |
|
712 | 711 | targetfctx = targetctx[path] |
|
713 | 712 | fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs) |
|
714 | 713 | # ignore symbolic links or binary, or unchanged files |
|
715 | 714 | if any( |
|
716 | 715 | f.islink() or stringutil.binary(f.data()) |
|
717 | 716 | for f in [targetfctx] + fctxs |
|
718 | 717 | if not isinstance(f, emptyfilecontext) |
|
719 | 718 | ): |
|
720 | 719 | continue |
|
721 | 720 | if targetfctx.data() == fctxs[-1].data() and not editopt: |
|
722 | 721 | continue |
|
723 | 722 | seenfctxs.update(fctxs[1:]) |
|
724 | 723 | self.fctxmap[path] = ctx2fctx |
|
725 | 724 | fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts) |
|
726 | 725 | if fm is not None: |
|
727 | 726 | fm.startitem() |
|
728 | 727 | fm.plain(b'showing changes for ') |
|
729 | 728 | fm.write(b'path', b'%s\n', path, label=b'absorb.path') |
|
730 | 729 | fm.data(linetype=b'path') |
|
731 | 730 | fstate.diffwith(targetfctx, fm) |
|
732 | 731 | self.fixupmap[path] = fstate |
|
733 | 732 | self.paths.append(path) |
|
734 | 733 | self.ctxaffected.update(fstate.ctxaffected) |
|
735 | 734 | |
|
736 | 735 | def apply(self): |
|
737 | 736 | """apply fixups to individual filefixupstates""" |
|
738 | 737 | for path, state in pycompat.iteritems(self.fixupmap): |
|
739 | 738 | if self.ui.debugflag: |
|
740 | 739 | self.ui.write(_(b'applying fixups to %s\n') % path) |
|
741 | 740 | state.apply() |
|
742 | 741 | |
|
743 | 742 | @property |
|
744 | 743 | def chunkstats(self): |
|
745 | 744 | """-> {path: chunkstats}. collect chunkstats from filefixupstates""" |
|
746 | 745 | return { |
|
747 | 746 | path: state.chunkstats |
|
748 | 747 | for path, state in pycompat.iteritems(self.fixupmap) |
|
749 | 748 | } |
|
750 | 749 | |
|
751 | 750 | def commit(self): |
|
752 | 751 | """commit changes. update self.finalnode, self.replacemap""" |
|
753 | 752 | with self.repo.transaction(b'absorb') as tr: |
|
754 | 753 | self._commitstack() |
|
755 | 754 | self._movebookmarks(tr) |
|
756 | 755 | if self.repo[b'.'].node() in self.replacemap: |
|
757 | 756 | self._moveworkingdirectoryparent() |
|
758 | 757 | self._cleanupoldcommits() |
|
759 | 758 | return self.finalnode |
|
760 | 759 | |
|
761 | 760 | def printchunkstats(self): |
|
762 | 761 | """print things like '1 of 2 chunk(s) applied'""" |
|
763 | 762 | ui = self.ui |
|
764 | 763 | chunkstats = self.chunkstats |
|
765 | 764 | if ui.verbose: |
|
766 | 765 | # chunkstats for each file |
|
767 | 766 | for path, stat in pycompat.iteritems(chunkstats): |
|
768 | 767 | if stat[0]: |
|
769 | 768 | ui.write( |
|
770 | 769 | _(b'%s: %d of %d chunk(s) applied\n') |
|
771 | 770 | % (path, stat[0], stat[1]) |
|
772 | 771 | ) |
|
773 | 772 | elif not ui.quiet: |
|
774 | 773 | # a summary for all files |
|
775 | 774 | stats = chunkstats.values() |
|
776 | 775 | applied, total = (sum(s[i] for s in stats) for i in (0, 1)) |
|
777 | 776 | ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total)) |
|
778 | 777 | |
|
779 | 778 | def _commitstack(self): |
|
780 | 779 | """make new commits. update self.finalnode, self.replacemap. |
|
781 | 780 | it is splitted from "commit" to avoid too much indentation. |
|
782 | 781 | """ |
|
783 | 782 | # last node (20-char) committed by us |
|
784 | 783 | lastcommitted = None |
|
785 | 784 | # p1 which overrides the parent of the next commit, "None" means use |
|
786 | 785 | # the original parent unchanged |
|
787 | 786 | nextp1 = None |
|
788 | 787 | for ctx in self.stack: |
|
789 | 788 | memworkingcopy = self._getnewfilecontents(ctx) |
|
790 | 789 | if not memworkingcopy and not lastcommitted: |
|
791 | 790 | # nothing changed, nothing commited |
|
792 | 791 | nextp1 = ctx |
|
793 | 792 | continue |
|
794 | 793 | willbecomenoop = ctx.files() and self._willbecomenoop( |
|
795 | 794 | memworkingcopy, ctx, nextp1 |
|
796 | 795 | ) |
|
797 | 796 | if self.skip_empty_successor and willbecomenoop: |
|
798 | 797 | # changeset is no longer necessary |
|
799 | 798 | self.replacemap[ctx.node()] = None |
|
800 | 799 | msg = _(b'became empty and was dropped') |
|
801 | 800 | else: |
|
802 | 801 | # changeset needs re-commit |
|
803 | 802 | nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1) |
|
804 | 803 | lastcommitted = self.repo[nodestr] |
|
805 | 804 | nextp1 = lastcommitted |
|
806 | 805 | self.replacemap[ctx.node()] = lastcommitted.node() |
|
807 | 806 | if memworkingcopy: |
|
808 | 807 | if willbecomenoop: |
|
809 | 808 | msg = _(b'%d file(s) changed, became empty as %s') |
|
810 | 809 | else: |
|
811 | 810 | msg = _(b'%d file(s) changed, became %s') |
|
812 | 811 | msg = msg % ( |
|
813 | 812 | len(memworkingcopy), |
|
814 | 813 | self._ctx2str(lastcommitted), |
|
815 | 814 | ) |
|
816 | 815 | else: |
|
817 | 816 | msg = _(b'became %s') % self._ctx2str(lastcommitted) |
|
818 | 817 | if self.ui.verbose and msg: |
|
819 | 818 | self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg)) |
|
820 | 819 | self.finalnode = lastcommitted and lastcommitted.node() |
|
821 | 820 | |
|
822 | 821 | def _ctx2str(self, ctx): |
|
823 | 822 | if self.ui.debugflag: |
|
824 | 823 | return b'%d:%s' % (ctx.rev(), ctx.hex()) |
|
825 | 824 | else: |
|
826 | 825 | return b'%d:%s' % (ctx.rev(), short(ctx.node())) |
|
827 | 826 | |
|
828 | 827 | def _getnewfilecontents(self, ctx): |
|
829 | 828 | """(ctx) -> {path: str} |
|
830 | 829 | |
|
831 | 830 | fetch file contents from filefixupstates. |
|
832 | 831 | return the working copy overrides - files different from ctx. |
|
833 | 832 | """ |
|
834 | 833 | result = {} |
|
835 | 834 | for path in self.paths: |
|
836 | 835 | ctx2fctx = self.fctxmap[path] # {ctx: fctx} |
|
837 | 836 | if ctx not in ctx2fctx: |
|
838 | 837 | continue |
|
839 | 838 | fctx = ctx2fctx[ctx] |
|
840 | 839 | content = fctx.data() |
|
841 | 840 | newcontent = self.fixupmap[path].getfinalcontent(fctx) |
|
842 | 841 | if content != newcontent: |
|
843 | 842 | result[fctx.path()] = newcontent |
|
844 | 843 | return result |
|
845 | 844 | |
|
846 | 845 | def _movebookmarks(self, tr): |
|
847 | 846 | repo = self.repo |
|
848 | 847 | needupdate = [ |
|
849 | 848 | (name, self.replacemap[hsh]) |
|
850 | 849 | for name, hsh in pycompat.iteritems(repo._bookmarks) |
|
851 | 850 | if hsh in self.replacemap |
|
852 | 851 | ] |
|
853 | 852 | changes = [] |
|
854 | 853 | for name, hsh in needupdate: |
|
855 | 854 | if hsh: |
|
856 | 855 | changes.append((name, hsh)) |
|
857 | 856 | if self.ui.verbose: |
|
858 | 857 | self.ui.write( |
|
859 | 858 | _(b'moving bookmark %s to %s\n') % (name, hex(hsh)) |
|
860 | 859 | ) |
|
861 | 860 | else: |
|
862 | 861 | changes.append((name, None)) |
|
863 | 862 | if self.ui.verbose: |
|
864 | 863 | self.ui.write(_(b'deleting bookmark %s\n') % name) |
|
865 | 864 | repo._bookmarks.applychanges(repo, tr, changes) |
|
866 | 865 | |
|
867 | 866 | def _moveworkingdirectoryparent(self): |
|
868 | 867 | if not self.finalnode: |
|
869 | 868 | # Find the latest not-{obsoleted,stripped} parent. |
|
870 | 869 | revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys()) |
|
871 | 870 | ctx = self.repo[revs.first()] |
|
872 | 871 | self.finalnode = ctx.node() |
|
873 | 872 | else: |
|
874 | 873 | ctx = self.repo[self.finalnode] |
|
875 | 874 | |
|
876 | 875 | dirstate = self.repo.dirstate |
|
877 | 876 | # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to |
|
878 | 877 | # be slow. in absorb's case, no need to invalidate fsmonitorstate. |
|
879 | 878 | noop = lambda: 0 |
|
880 | 879 | restore = noop |
|
881 | 880 | if util.safehasattr(dirstate, '_fsmonitorstate'): |
|
882 | 881 | bak = dirstate._fsmonitorstate.invalidate |
|
883 | 882 | |
|
884 | 883 | def restore(): |
|
885 | 884 | dirstate._fsmonitorstate.invalidate = bak |
|
886 | 885 | |
|
887 | 886 | dirstate._fsmonitorstate.invalidate = noop |
|
888 | 887 | try: |
|
889 | 888 | with dirstate.parentchange(): |
|
890 | 889 | dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths) |
|
891 | 890 | finally: |
|
892 | 891 | restore() |
|
893 | 892 | |
|
894 | 893 | @staticmethod |
|
895 | 894 | def _willbecomenoop(memworkingcopy, ctx, pctx=None): |
|
896 | 895 | """({path: content}, ctx, ctx) -> bool. test if a commit will be noop |
|
897 | 896 | |
|
898 | 897 | if it will become an empty commit (does not change anything, after the |
|
899 | 898 | memworkingcopy overrides), return True. otherwise return False. |
|
900 | 899 | """ |
|
901 | 900 | if not pctx: |
|
902 | 901 | parents = ctx.parents() |
|
903 | 902 | if len(parents) != 1: |
|
904 | 903 | return False |
|
905 | 904 | pctx = parents[0] |
|
906 | 905 | if ctx.branch() != pctx.branch(): |
|
907 | 906 | return False |
|
908 | 907 | if ctx.extra().get(b'close'): |
|
909 | 908 | return False |
|
910 | 909 | # ctx changes more files (not a subset of memworkingcopy) |
|
911 | 910 | if not set(ctx.files()).issubset(set(memworkingcopy)): |
|
912 | 911 | return False |
|
913 | 912 | for path, content in pycompat.iteritems(memworkingcopy): |
|
914 | 913 | if path not in pctx or path not in ctx: |
|
915 | 914 | return False |
|
916 | 915 | fctx = ctx[path] |
|
917 | 916 | pfctx = pctx[path] |
|
918 | 917 | if pfctx.flags() != fctx.flags(): |
|
919 | 918 | return False |
|
920 | 919 | if pfctx.data() != content: |
|
921 | 920 | return False |
|
922 | 921 | return True |
|
923 | 922 | |
|
924 | 923 | def _commitsingle(self, memworkingcopy, ctx, p1=None): |
|
925 | 924 | """(ctx, {path: content}, node) -> node. make a single commit |
|
926 | 925 | |
|
927 | 926 | the commit is a clone from ctx, with a (optionally) different p1, and |
|
928 | 927 | different file contents replaced by memworkingcopy. |
|
929 | 928 | """ |
|
930 | parents = p1 and (p1, nullid) | |
|
929 | parents = p1 and (p1, self.repo.nullid) | |
|
931 | 930 | extra = ctx.extra() |
|
932 | 931 | if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'): |
|
933 | 932 | extra[b'absorb_source'] = ctx.hex() |
|
934 | 933 | |
|
935 | 934 | desc = rewriteutil.update_hash_refs( |
|
936 | 935 | ctx.repo(), |
|
937 | 936 | ctx.description(), |
|
938 | 937 | { |
|
939 | 938 | oldnode: [newnode] |
|
940 | 939 | for oldnode, newnode in self.replacemap.items() |
|
941 | 940 | }, |
|
942 | 941 | ) |
|
943 | 942 | mctx = overlaycontext( |
|
944 | 943 | memworkingcopy, ctx, parents, extra=extra, desc=desc |
|
945 | 944 | ) |
|
946 | 945 | return mctx.commit() |
|
947 | 946 | |
|
948 | 947 | @util.propertycache |
|
949 | 948 | def _useobsolete(self): |
|
950 | 949 | """() -> bool""" |
|
951 | 950 | return obsolete.isenabled(self.repo, obsolete.createmarkersopt) |
|
952 | 951 | |
|
953 | 952 | def _cleanupoldcommits(self): |
|
954 | 953 | replacements = { |
|
955 | 954 | k: ([v] if v is not None else []) |
|
956 | 955 | for k, v in pycompat.iteritems(self.replacemap) |
|
957 | 956 | } |
|
958 | 957 | if replacements: |
|
959 | 958 | scmutil.cleanupnodes( |
|
960 | 959 | self.repo, replacements, operation=b'absorb', fixphase=True |
|
961 | 960 | ) |
|
962 | 961 | |
|
963 | 962 | @util.propertycache |
|
964 | 963 | def skip_empty_successor(self): |
|
965 | 964 | return rewriteutil.skip_empty_successor(self.ui, b'absorb') |
|
966 | 965 | |
|
967 | 966 | |
|
968 | 967 | def _parsechunk(hunk): |
|
969 | 968 | """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))""" |
|
970 | 969 | if type(hunk) not in (crecord.uihunk, patch.recordhunk): |
|
971 | 970 | return None, None |
|
972 | 971 | path = hunk.header.filename() |
|
973 | 972 | a1 = hunk.fromline + len(hunk.before) - 1 |
|
974 | 973 | # remove before and after context |
|
975 | 974 | hunk.before = hunk.after = [] |
|
976 | 975 | buf = util.stringio() |
|
977 | 976 | hunk.write(buf) |
|
978 | 977 | patchlines = mdiff.splitnewlines(buf.getvalue()) |
|
979 | 978 | # hunk.prettystr() will update hunk.removed |
|
980 | 979 | a2 = a1 + hunk.removed |
|
981 | 980 | blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')] |
|
982 | 981 | return path, (a1, a2, blines) |
|
983 | 982 | |
|
984 | 983 | |
|
985 | 984 | def overlaydiffcontext(ctx, chunks): |
|
986 | 985 | """(ctx, [crecord.uihunk]) -> memctx |
|
987 | 986 | |
|
988 | 987 | return a memctx with some [1] patches (chunks) applied to ctx. |
|
989 | 988 | [1]: modifications are handled. renames, mode changes, etc. are ignored. |
|
990 | 989 | """ |
|
991 | 990 | # sadly the applying-patch logic is hardly reusable, and messy: |
|
992 | 991 | # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it |
|
993 | 992 | # needs a file stream of a patch and will re-parse it, while we have |
|
994 | 993 | # structured hunk objects at hand. |
|
995 | 994 | # 2. a lot of different implementations about "chunk" (patch.hunk, |
|
996 | 995 | # patch.recordhunk, crecord.uihunk) |
|
997 | 996 | # as we only care about applying changes to modified files, no mode |
|
998 | 997 | # change, no binary diff, and no renames, it's probably okay to |
|
999 | 998 | # re-invent the logic using much simpler code here. |
|
1000 | 999 | memworkingcopy = {} # {path: content} |
|
1001 | 1000 | patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]} |
|
1002 | 1001 | for path, info in map(_parsechunk, chunks): |
|
1003 | 1002 | if not path or not info: |
|
1004 | 1003 | continue |
|
1005 | 1004 | patchmap[path].append(info) |
|
1006 | 1005 | for path, patches in pycompat.iteritems(patchmap): |
|
1007 | 1006 | if path not in ctx or not patches: |
|
1008 | 1007 | continue |
|
1009 | 1008 | patches.sort(reverse=True) |
|
1010 | 1009 | lines = mdiff.splitnewlines(ctx[path].data()) |
|
1011 | 1010 | for a1, a2, blines in patches: |
|
1012 | 1011 | lines[a1:a2] = blines |
|
1013 | 1012 | memworkingcopy[path] = b''.join(lines) |
|
1014 | 1013 | return overlaycontext(memworkingcopy, ctx) |
|
1015 | 1014 | |
|
1016 | 1015 | |
|
1017 | 1016 | def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None): |
|
1018 | 1017 | """pick fixup chunks from targetctx, apply them to stack. |
|
1019 | 1018 | |
|
1020 | 1019 | if targetctx is None, the working copy context will be used. |
|
1021 | 1020 | if stack is None, the current draft stack will be used. |
|
1022 | 1021 | return fixupstate. |
|
1023 | 1022 | """ |
|
1024 | 1023 | if stack is None: |
|
1025 | 1024 | limit = ui.configint(b'absorb', b'max-stack-size') |
|
1026 | 1025 | headctx = repo[b'.'] |
|
1027 | 1026 | if len(headctx.parents()) > 1: |
|
1028 | 1027 | raise error.InputError(_(b'cannot absorb into a merge')) |
|
1029 | 1028 | stack = getdraftstack(headctx, limit) |
|
1030 | 1029 | if limit and len(stack) >= limit: |
|
1031 | 1030 | ui.warn( |
|
1032 | 1031 | _( |
|
1033 | 1032 | b'absorb: only the recent %d changesets will ' |
|
1034 | 1033 | b'be analysed\n' |
|
1035 | 1034 | ) |
|
1036 | 1035 | % limit |
|
1037 | 1036 | ) |
|
1038 | 1037 | if not stack: |
|
1039 | 1038 | raise error.InputError(_(b'no mutable changeset to change')) |
|
1040 | 1039 | if targetctx is None: # default to working copy |
|
1041 | 1040 | targetctx = repo[None] |
|
1042 | 1041 | if pats is None: |
|
1043 | 1042 | pats = () |
|
1044 | 1043 | if opts is None: |
|
1045 | 1044 | opts = {} |
|
1046 | 1045 | state = fixupstate(stack, ui=ui, opts=opts) |
|
1047 | 1046 | matcher = scmutil.match(targetctx, pats, opts) |
|
1048 | 1047 | if opts.get(b'interactive'): |
|
1049 | 1048 | diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher) |
|
1050 | 1049 | origchunks = patch.parsepatch(diff) |
|
1051 | 1050 | chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0] |
|
1052 | 1051 | targetctx = overlaydiffcontext(stack[-1], chunks) |
|
1053 | 1052 | fm = None |
|
1054 | 1053 | if opts.get(b'print_changes') or not opts.get(b'apply_changes'): |
|
1055 | 1054 | fm = ui.formatter(b'absorb', opts) |
|
1056 | 1055 | state.diffwith(targetctx, matcher, fm) |
|
1057 | 1056 | if fm is not None: |
|
1058 | 1057 | fm.startitem() |
|
1059 | 1058 | fm.write( |
|
1060 | 1059 | b"count", b"\n%d changesets affected\n", len(state.ctxaffected) |
|
1061 | 1060 | ) |
|
1062 | 1061 | fm.data(linetype=b'summary') |
|
1063 | 1062 | for ctx in reversed(stack): |
|
1064 | 1063 | if ctx not in state.ctxaffected: |
|
1065 | 1064 | continue |
|
1066 | 1065 | fm.startitem() |
|
1067 | 1066 | fm.context(ctx=ctx) |
|
1068 | 1067 | fm.data(linetype=b'changeset') |
|
1069 | 1068 | fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node') |
|
1070 | 1069 | descfirstline = ctx.description().splitlines()[0] |
|
1071 | 1070 | fm.write( |
|
1072 | 1071 | b'descfirstline', |
|
1073 | 1072 | b'%s\n', |
|
1074 | 1073 | descfirstline, |
|
1075 | 1074 | label=b'absorb.description', |
|
1076 | 1075 | ) |
|
1077 | 1076 | fm.end() |
|
1078 | 1077 | if not opts.get(b'dry_run'): |
|
1079 | 1078 | if ( |
|
1080 | 1079 | not opts.get(b'apply_changes') |
|
1081 | 1080 | and state.ctxaffected |
|
1082 | 1081 | and ui.promptchoice( |
|
1083 | 1082 | b"apply changes (y/N)? $$ &Yes $$ &No", default=1 |
|
1084 | 1083 | ) |
|
1085 | 1084 | ): |
|
1086 | 1085 | raise error.CanceledError(_(b'absorb cancelled\n')) |
|
1087 | 1086 | |
|
1088 | 1087 | state.apply() |
|
1089 | 1088 | if state.commit(): |
|
1090 | 1089 | state.printchunkstats() |
|
1091 | 1090 | elif not ui.quiet: |
|
1092 | 1091 | ui.write(_(b'nothing applied\n')) |
|
1093 | 1092 | return state |
|
1094 | 1093 | |
|
1095 | 1094 | |
|
1096 | 1095 | @command( |
|
1097 | 1096 | b'absorb', |
|
1098 | 1097 | [ |
|
1099 | 1098 | ( |
|
1100 | 1099 | b'a', |
|
1101 | 1100 | b'apply-changes', |
|
1102 | 1101 | None, |
|
1103 | 1102 | _(b'apply changes without prompting for confirmation'), |
|
1104 | 1103 | ), |
|
1105 | 1104 | ( |
|
1106 | 1105 | b'p', |
|
1107 | 1106 | b'print-changes', |
|
1108 | 1107 | None, |
|
1109 | 1108 | _(b'always print which changesets are modified by which changes'), |
|
1110 | 1109 | ), |
|
1111 | 1110 | ( |
|
1112 | 1111 | b'i', |
|
1113 | 1112 | b'interactive', |
|
1114 | 1113 | None, |
|
1115 | 1114 | _(b'interactively select which chunks to apply'), |
|
1116 | 1115 | ), |
|
1117 | 1116 | ( |
|
1118 | 1117 | b'e', |
|
1119 | 1118 | b'edit-lines', |
|
1120 | 1119 | None, |
|
1121 | 1120 | _( |
|
1122 | 1121 | b'edit what lines belong to which changesets before commit ' |
|
1123 | 1122 | b'(EXPERIMENTAL)' |
|
1124 | 1123 | ), |
|
1125 | 1124 | ), |
|
1126 | 1125 | ] |
|
1127 | 1126 | + commands.dryrunopts |
|
1128 | 1127 | + commands.templateopts |
|
1129 | 1128 | + commands.walkopts, |
|
1130 | 1129 | _(b'hg absorb [OPTION] [FILE]...'), |
|
1131 | 1130 | helpcategory=command.CATEGORY_COMMITTING, |
|
1132 | 1131 | helpbasic=True, |
|
1133 | 1132 | ) |
|
1134 | 1133 | def absorbcmd(ui, repo, *pats, **opts): |
|
1135 | 1134 | """incorporate corrections into the stack of draft changesets |
|
1136 | 1135 | |
|
1137 | 1136 | absorb analyzes each change in your working directory and attempts to |
|
1138 | 1137 | amend the changed lines into the changesets in your stack that first |
|
1139 | 1138 | introduced those lines. |
|
1140 | 1139 | |
|
1141 | 1140 | If absorb cannot find an unambiguous changeset to amend for a change, |
|
1142 | 1141 | that change will be left in the working directory, untouched. They can be |
|
1143 | 1142 | observed by :hg:`status` or :hg:`diff` afterwards. In other words, |
|
1144 | 1143 | absorb does not write to the working directory. |
|
1145 | 1144 | |
|
1146 | 1145 | Changesets outside the revset `::. and not public() and not merge()` will |
|
1147 | 1146 | not be changed. |
|
1148 | 1147 | |
|
1149 | 1148 | Changesets that become empty after applying the changes will be deleted. |
|
1150 | 1149 | |
|
1151 | 1150 | By default, absorb will show what it plans to do and prompt for |
|
1152 | 1151 | confirmation. If you are confident that the changes will be absorbed |
|
1153 | 1152 | to the correct place, run :hg:`absorb -a` to apply the changes |
|
1154 | 1153 | immediately. |
|
1155 | 1154 | |
|
1156 | 1155 | Returns 0 on success, 1 if all chunks were ignored and nothing amended. |
|
1157 | 1156 | """ |
|
1158 | 1157 | opts = pycompat.byteskwargs(opts) |
|
1159 | 1158 | |
|
1160 | 1159 | with repo.wlock(), repo.lock(): |
|
1161 | 1160 | if not opts[b'dry_run']: |
|
1162 | 1161 | cmdutil.checkunfinished(repo) |
|
1163 | 1162 | |
|
1164 | 1163 | state = absorb(ui, repo, pats=pats, opts=opts) |
|
1165 | 1164 | if sum(s[0] for s in state.chunkstats.values()) == 0: |
|
1166 | 1165 | return 1 |
@@ -1,531 +1,531 b'' | |||
|
1 | 1 | # git.py - git support for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import os |
|
10 | 10 | |
|
11 | 11 | from mercurial.i18n import _ |
|
12 |
from mercurial.node import |
|
|
12 | from mercurial.node import sha1nodeconstants | |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | config, |
|
15 | 15 | error, |
|
16 | 16 | pycompat, |
|
17 | 17 | ) |
|
18 | 18 | |
|
19 | 19 | from . import common |
|
20 | 20 | |
|
21 | 21 | |
|
22 | 22 | class submodule(object): |
|
23 | 23 | def __init__(self, path, node, url): |
|
24 | 24 | self.path = path |
|
25 | 25 | self.node = node |
|
26 | 26 | self.url = url |
|
27 | 27 | |
|
28 | 28 | def hgsub(self): |
|
29 | 29 | return b"%s = [git]%s" % (self.path, self.url) |
|
30 | 30 | |
|
31 | 31 | def hgsubstate(self): |
|
32 | 32 | return b"%s %s" % (self.node, self.path) |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | # Keys in extra fields that should not be copied if the user requests. |
|
36 | 36 | bannedextrakeys = { |
|
37 | 37 | # Git commit object built-ins. |
|
38 | 38 | b'tree', |
|
39 | 39 | b'parent', |
|
40 | 40 | b'author', |
|
41 | 41 | b'committer', |
|
42 | 42 | # Mercurial built-ins. |
|
43 | 43 | b'branch', |
|
44 | 44 | b'close', |
|
45 | 45 | } |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | class convert_git(common.converter_source, common.commandline): |
|
49 | 49 | # Windows does not support GIT_DIR= construct while other systems |
|
50 | 50 | # cannot remove environment variable. Just assume none have |
|
51 | 51 | # both issues. |
|
52 | 52 | |
|
53 | 53 | def _gitcmd(self, cmd, *args, **kwargs): |
|
54 | 54 | return cmd(b'--git-dir=%s' % self.path, *args, **kwargs) |
|
55 | 55 | |
|
56 | 56 | def gitrun0(self, *args, **kwargs): |
|
57 | 57 | return self._gitcmd(self.run0, *args, **kwargs) |
|
58 | 58 | |
|
59 | 59 | def gitrun(self, *args, **kwargs): |
|
60 | 60 | return self._gitcmd(self.run, *args, **kwargs) |
|
61 | 61 | |
|
62 | 62 | def gitrunlines0(self, *args, **kwargs): |
|
63 | 63 | return self._gitcmd(self.runlines0, *args, **kwargs) |
|
64 | 64 | |
|
65 | 65 | def gitrunlines(self, *args, **kwargs): |
|
66 | 66 | return self._gitcmd(self.runlines, *args, **kwargs) |
|
67 | 67 | |
|
68 | 68 | def gitpipe(self, *args, **kwargs): |
|
69 | 69 | return self._gitcmd(self._run3, *args, **kwargs) |
|
70 | 70 | |
|
71 | 71 | def __init__(self, ui, repotype, path, revs=None): |
|
72 | 72 | super(convert_git, self).__init__(ui, repotype, path, revs=revs) |
|
73 | 73 | common.commandline.__init__(self, ui, b'git') |
|
74 | 74 | |
|
75 | 75 | # Pass an absolute path to git to prevent from ever being interpreted |
|
76 | 76 | # as a URL |
|
77 | 77 | path = os.path.abspath(path) |
|
78 | 78 | |
|
79 | 79 | if os.path.isdir(path + b"/.git"): |
|
80 | 80 | path += b"/.git" |
|
81 | 81 | if not os.path.exists(path + b"/objects"): |
|
82 | 82 | raise common.NoRepo( |
|
83 | 83 | _(b"%s does not look like a Git repository") % path |
|
84 | 84 | ) |
|
85 | 85 | |
|
86 | 86 | # The default value (50) is based on the default for 'git diff'. |
|
87 | 87 | similarity = ui.configint(b'convert', b'git.similarity') |
|
88 | 88 | if similarity < 0 or similarity > 100: |
|
89 | 89 | raise error.Abort(_(b'similarity must be between 0 and 100')) |
|
90 | 90 | if similarity > 0: |
|
91 | 91 | self.simopt = [b'-C%d%%' % similarity] |
|
92 | 92 | findcopiesharder = ui.configbool( |
|
93 | 93 | b'convert', b'git.findcopiesharder' |
|
94 | 94 | ) |
|
95 | 95 | if findcopiesharder: |
|
96 | 96 | self.simopt.append(b'--find-copies-harder') |
|
97 | 97 | |
|
98 | 98 | renamelimit = ui.configint(b'convert', b'git.renamelimit') |
|
99 | 99 | self.simopt.append(b'-l%d' % renamelimit) |
|
100 | 100 | else: |
|
101 | 101 | self.simopt = [] |
|
102 | 102 | |
|
103 | 103 | common.checktool(b'git', b'git') |
|
104 | 104 | |
|
105 | 105 | self.path = path |
|
106 | 106 | self.submodules = [] |
|
107 | 107 | |
|
108 | 108 | self.catfilepipe = self.gitpipe(b'cat-file', b'--batch') |
|
109 | 109 | |
|
110 | 110 | self.copyextrakeys = self.ui.configlist(b'convert', b'git.extrakeys') |
|
111 | 111 | banned = set(self.copyextrakeys) & bannedextrakeys |
|
112 | 112 | if banned: |
|
113 | 113 | raise error.Abort( |
|
114 | 114 | _(b'copying of extra key is forbidden: %s') |
|
115 | 115 | % _(b', ').join(sorted(banned)) |
|
116 | 116 | ) |
|
117 | 117 | |
|
118 | 118 | committeractions = self.ui.configlist( |
|
119 | 119 | b'convert', b'git.committeractions' |
|
120 | 120 | ) |
|
121 | 121 | |
|
122 | 122 | messagedifferent = None |
|
123 | 123 | messagealways = None |
|
124 | 124 | for a in committeractions: |
|
125 | 125 | if a.startswith((b'messagedifferent', b'messagealways')): |
|
126 | 126 | k = a |
|
127 | 127 | v = None |
|
128 | 128 | if b'=' in a: |
|
129 | 129 | k, v = a.split(b'=', 1) |
|
130 | 130 | |
|
131 | 131 | if k == b'messagedifferent': |
|
132 | 132 | messagedifferent = v or b'committer:' |
|
133 | 133 | elif k == b'messagealways': |
|
134 | 134 | messagealways = v or b'committer:' |
|
135 | 135 | |
|
136 | 136 | if messagedifferent and messagealways: |
|
137 | 137 | raise error.Abort( |
|
138 | 138 | _( |
|
139 | 139 | b'committeractions cannot define both ' |
|
140 | 140 | b'messagedifferent and messagealways' |
|
141 | 141 | ) |
|
142 | 142 | ) |
|
143 | 143 | |
|
144 | 144 | dropcommitter = b'dropcommitter' in committeractions |
|
145 | 145 | replaceauthor = b'replaceauthor' in committeractions |
|
146 | 146 | |
|
147 | 147 | if dropcommitter and replaceauthor: |
|
148 | 148 | raise error.Abort( |
|
149 | 149 | _( |
|
150 | 150 | b'committeractions cannot define both ' |
|
151 | 151 | b'dropcommitter and replaceauthor' |
|
152 | 152 | ) |
|
153 | 153 | ) |
|
154 | 154 | |
|
155 | 155 | if dropcommitter and messagealways: |
|
156 | 156 | raise error.Abort( |
|
157 | 157 | _( |
|
158 | 158 | b'committeractions cannot define both ' |
|
159 | 159 | b'dropcommitter and messagealways' |
|
160 | 160 | ) |
|
161 | 161 | ) |
|
162 | 162 | |
|
163 | 163 | if not messagedifferent and not messagealways: |
|
164 | 164 | messagedifferent = b'committer:' |
|
165 | 165 | |
|
166 | 166 | self.committeractions = { |
|
167 | 167 | b'dropcommitter': dropcommitter, |
|
168 | 168 | b'replaceauthor': replaceauthor, |
|
169 | 169 | b'messagedifferent': messagedifferent, |
|
170 | 170 | b'messagealways': messagealways, |
|
171 | 171 | } |
|
172 | 172 | |
|
173 | 173 | def after(self): |
|
174 | 174 | for f in self.catfilepipe: |
|
175 | 175 | f.close() |
|
176 | 176 | |
|
177 | 177 | def getheads(self): |
|
178 | 178 | if not self.revs: |
|
179 | 179 | output, status = self.gitrun( |
|
180 | 180 | b'rev-parse', b'--branches', b'--remotes' |
|
181 | 181 | ) |
|
182 | 182 | heads = output.splitlines() |
|
183 | 183 | if status: |
|
184 | 184 | raise error.Abort(_(b'cannot retrieve git heads')) |
|
185 | 185 | else: |
|
186 | 186 | heads = [] |
|
187 | 187 | for rev in self.revs: |
|
188 | 188 | rawhead, ret = self.gitrun(b'rev-parse', b'--verify', rev) |
|
189 | 189 | heads.append(rawhead[:-1]) |
|
190 | 190 | if ret: |
|
191 | 191 | raise error.Abort(_(b'cannot retrieve git head "%s"') % rev) |
|
192 | 192 | return heads |
|
193 | 193 | |
|
194 | 194 | def catfile(self, rev, ftype): |
|
195 | if rev == nullhex: | |
|
195 | if rev == sha1nodeconstants.nullhex: | |
|
196 | 196 | raise IOError |
|
197 | 197 | self.catfilepipe[0].write(rev + b'\n') |
|
198 | 198 | self.catfilepipe[0].flush() |
|
199 | 199 | info = self.catfilepipe[1].readline().split() |
|
200 | 200 | if info[1] != ftype: |
|
201 | 201 | raise error.Abort( |
|
202 | 202 | _(b'cannot read %r object at %s') |
|
203 | 203 | % (pycompat.bytestr(ftype), rev) |
|
204 | 204 | ) |
|
205 | 205 | size = int(info[2]) |
|
206 | 206 | data = self.catfilepipe[1].read(size) |
|
207 | 207 | if len(data) < size: |
|
208 | 208 | raise error.Abort( |
|
209 | 209 | _(b'cannot read %r object at %s: unexpected size') |
|
210 | 210 | % (ftype, rev) |
|
211 | 211 | ) |
|
212 | 212 | # read the trailing newline |
|
213 | 213 | self.catfilepipe[1].read(1) |
|
214 | 214 | return data |
|
215 | 215 | |
|
216 | 216 | def getfile(self, name, rev): |
|
217 | if rev == nullhex: | |
|
217 | if rev == sha1nodeconstants.nullhex: | |
|
218 | 218 | return None, None |
|
219 | 219 | if name == b'.hgsub': |
|
220 | 220 | data = b'\n'.join([m.hgsub() for m in self.submoditer()]) |
|
221 | 221 | mode = b'' |
|
222 | 222 | elif name == b'.hgsubstate': |
|
223 | 223 | data = b'\n'.join([m.hgsubstate() for m in self.submoditer()]) |
|
224 | 224 | mode = b'' |
|
225 | 225 | else: |
|
226 | 226 | data = self.catfile(rev, b"blob") |
|
227 | 227 | mode = self.modecache[(name, rev)] |
|
228 | 228 | return data, mode |
|
229 | 229 | |
|
230 | 230 | def submoditer(self): |
|
231 | null = nullhex | |
|
231 | null = sha1nodeconstants.nullhex | |
|
232 | 232 | for m in sorted(self.submodules, key=lambda p: p.path): |
|
233 | 233 | if m.node != null: |
|
234 | 234 | yield m |
|
235 | 235 | |
|
236 | 236 | def parsegitmodules(self, content): |
|
237 | 237 | """Parse the formatted .gitmodules file, example file format: |
|
238 | 238 | [submodule "sub"]\n |
|
239 | 239 | \tpath = sub\n |
|
240 | 240 | \turl = git://giturl\n |
|
241 | 241 | """ |
|
242 | 242 | self.submodules = [] |
|
243 | 243 | c = config.config() |
|
244 | 244 | # Each item in .gitmodules starts with whitespace that cant be parsed |
|
245 | 245 | c.parse( |
|
246 | 246 | b'.gitmodules', |
|
247 | 247 | b'\n'.join(line.strip() for line in content.split(b'\n')), |
|
248 | 248 | ) |
|
249 | 249 | for sec in c.sections(): |
|
250 | 250 | # turn the config object into a real dict |
|
251 | 251 | s = dict(c.items(sec)) |
|
252 | 252 | if b'url' in s and b'path' in s: |
|
253 | 253 | self.submodules.append(submodule(s[b'path'], b'', s[b'url'])) |
|
254 | 254 | |
|
255 | 255 | def retrievegitmodules(self, version): |
|
256 | 256 | modules, ret = self.gitrun( |
|
257 | 257 | b'show', b'%s:%s' % (version, b'.gitmodules') |
|
258 | 258 | ) |
|
259 | 259 | if ret: |
|
260 | 260 | # This can happen if a file is in the repo that has permissions |
|
261 | 261 | # 160000, but there is no .gitmodules file. |
|
262 | 262 | self.ui.warn( |
|
263 | 263 | _(b"warning: cannot read submodules config file in %s\n") |
|
264 | 264 | % version |
|
265 | 265 | ) |
|
266 | 266 | return |
|
267 | 267 | |
|
268 | 268 | try: |
|
269 | 269 | self.parsegitmodules(modules) |
|
270 | 270 | except error.ParseError: |
|
271 | 271 | self.ui.warn( |
|
272 | 272 | _(b"warning: unable to parse .gitmodules in %s\n") % version |
|
273 | 273 | ) |
|
274 | 274 | return |
|
275 | 275 | |
|
276 | 276 | for m in self.submodules: |
|
277 | 277 | node, ret = self.gitrun(b'rev-parse', b'%s:%s' % (version, m.path)) |
|
278 | 278 | if ret: |
|
279 | 279 | continue |
|
280 | 280 | m.node = node.strip() |
|
281 | 281 | |
|
282 | 282 | def getchanges(self, version, full): |
|
283 | 283 | if full: |
|
284 | 284 | raise error.Abort(_(b"convert from git does not support --full")) |
|
285 | 285 | self.modecache = {} |
|
286 | 286 | cmd = ( |
|
287 | 287 | [b'diff-tree', b'-z', b'--root', b'-m', b'-r'] |
|
288 | 288 | + self.simopt |
|
289 | 289 | + [version] |
|
290 | 290 | ) |
|
291 | 291 | output, status = self.gitrun(*cmd) |
|
292 | 292 | if status: |
|
293 | 293 | raise error.Abort(_(b'cannot read changes in %s') % version) |
|
294 | 294 | changes = [] |
|
295 | 295 | copies = {} |
|
296 | 296 | seen = set() |
|
297 | 297 | entry = None |
|
298 | 298 | subexists = [False] |
|
299 | 299 | subdeleted = [False] |
|
300 | 300 | difftree = output.split(b'\x00') |
|
301 | 301 | lcount = len(difftree) |
|
302 | 302 | i = 0 |
|
303 | 303 | |
|
304 | 304 | skipsubmodules = self.ui.configbool(b'convert', b'git.skipsubmodules') |
|
305 | 305 | |
|
306 | 306 | def add(entry, f, isdest): |
|
307 | 307 | seen.add(f) |
|
308 | 308 | h = entry[3] |
|
309 | 309 | p = entry[1] == b"100755" |
|
310 | 310 | s = entry[1] == b"120000" |
|
311 | 311 | renamesource = not isdest and entry[4][0] == b'R' |
|
312 | 312 | |
|
313 | 313 | if f == b'.gitmodules': |
|
314 | 314 | if skipsubmodules: |
|
315 | 315 | return |
|
316 | 316 | |
|
317 | 317 | subexists[0] = True |
|
318 | 318 | if entry[4] == b'D' or renamesource: |
|
319 | 319 | subdeleted[0] = True |
|
320 | changes.append((b'.hgsub', nullhex)) | |
|
320 | changes.append((b'.hgsub', sha1nodeconstants.nullhex)) | |
|
321 | 321 | else: |
|
322 | 322 | changes.append((b'.hgsub', b'')) |
|
323 | 323 | elif entry[1] == b'160000' or entry[0] == b':160000': |
|
324 | 324 | if not skipsubmodules: |
|
325 | 325 | subexists[0] = True |
|
326 | 326 | else: |
|
327 | 327 | if renamesource: |
|
328 | h = nullhex | |
|
328 | h = sha1nodeconstants.nullhex | |
|
329 | 329 | self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b"" |
|
330 | 330 | changes.append((f, h)) |
|
331 | 331 | |
|
332 | 332 | while i < lcount: |
|
333 | 333 | l = difftree[i] |
|
334 | 334 | i += 1 |
|
335 | 335 | if not entry: |
|
336 | 336 | if not l.startswith(b':'): |
|
337 | 337 | continue |
|
338 | 338 | entry = tuple(pycompat.bytestr(p) for p in l.split()) |
|
339 | 339 | continue |
|
340 | 340 | f = l |
|
341 | 341 | if entry[4][0] == b'C': |
|
342 | 342 | copysrc = f |
|
343 | 343 | copydest = difftree[i] |
|
344 | 344 | i += 1 |
|
345 | 345 | f = copydest |
|
346 | 346 | copies[copydest] = copysrc |
|
347 | 347 | if f not in seen: |
|
348 | 348 | add(entry, f, False) |
|
349 | 349 | # A file can be copied multiple times, or modified and copied |
|
350 | 350 | # simultaneously. So f can be repeated even if fdest isn't. |
|
351 | 351 | if entry[4][0] == b'R': |
|
352 | 352 | # rename: next line is the destination |
|
353 | 353 | fdest = difftree[i] |
|
354 | 354 | i += 1 |
|
355 | 355 | if fdest not in seen: |
|
356 | 356 | add(entry, fdest, True) |
|
357 | 357 | # .gitmodules isn't imported at all, so it being copied to |
|
358 | 358 | # and fro doesn't really make sense |
|
359 | 359 | if f != b'.gitmodules' and fdest != b'.gitmodules': |
|
360 | 360 | copies[fdest] = f |
|
361 | 361 | entry = None |
|
362 | 362 | |
|
363 | 363 | if subexists[0]: |
|
364 | 364 | if subdeleted[0]: |
|
365 | changes.append((b'.hgsubstate', nullhex)) | |
|
365 | changes.append((b'.hgsubstate', sha1nodeconstants.nullhex)) | |
|
366 | 366 | else: |
|
367 | 367 | self.retrievegitmodules(version) |
|
368 | 368 | changes.append((b'.hgsubstate', b'')) |
|
369 | 369 | return (changes, copies, set()) |
|
370 | 370 | |
|
371 | 371 | def getcommit(self, version): |
|
372 | 372 | c = self.catfile(version, b"commit") # read the commit hash |
|
373 | 373 | end = c.find(b"\n\n") |
|
374 | 374 | message = c[end + 2 :] |
|
375 | 375 | message = self.recode(message) |
|
376 | 376 | l = c[:end].splitlines() |
|
377 | 377 | parents = [] |
|
378 | 378 | author = committer = None |
|
379 | 379 | extra = {} |
|
380 | 380 | for e in l[1:]: |
|
381 | 381 | n, v = e.split(b" ", 1) |
|
382 | 382 | if n == b"author": |
|
383 | 383 | p = v.split() |
|
384 | 384 | tm, tz = p[-2:] |
|
385 | 385 | author = b" ".join(p[:-2]) |
|
386 | 386 | if author[0] == b"<": |
|
387 | 387 | author = author[1:-1] |
|
388 | 388 | author = self.recode(author) |
|
389 | 389 | if n == b"committer": |
|
390 | 390 | p = v.split() |
|
391 | 391 | tm, tz = p[-2:] |
|
392 | 392 | committer = b" ".join(p[:-2]) |
|
393 | 393 | if committer[0] == b"<": |
|
394 | 394 | committer = committer[1:-1] |
|
395 | 395 | committer = self.recode(committer) |
|
396 | 396 | if n == b"parent": |
|
397 | 397 | parents.append(v) |
|
398 | 398 | if n in self.copyextrakeys: |
|
399 | 399 | extra[n] = v |
|
400 | 400 | |
|
401 | 401 | if self.committeractions[b'dropcommitter']: |
|
402 | 402 | committer = None |
|
403 | 403 | elif self.committeractions[b'replaceauthor']: |
|
404 | 404 | author = committer |
|
405 | 405 | |
|
406 | 406 | if committer: |
|
407 | 407 | messagealways = self.committeractions[b'messagealways'] |
|
408 | 408 | messagedifferent = self.committeractions[b'messagedifferent'] |
|
409 | 409 | if messagealways: |
|
410 | 410 | message += b'\n%s %s\n' % (messagealways, committer) |
|
411 | 411 | elif messagedifferent and author != committer: |
|
412 | 412 | message += b'\n%s %s\n' % (messagedifferent, committer) |
|
413 | 413 | |
|
414 | 414 | tzs, tzh, tzm = tz[-5:-4] + b"1", tz[-4:-2], tz[-2:] |
|
415 | 415 | tz = -int(tzs) * (int(tzh) * 3600 + int(tzm)) |
|
416 | 416 | date = tm + b" " + (b"%d" % tz) |
|
417 | 417 | saverev = self.ui.configbool(b'convert', b'git.saverev') |
|
418 | 418 | |
|
419 | 419 | c = common.commit( |
|
420 | 420 | parents=parents, |
|
421 | 421 | date=date, |
|
422 | 422 | author=author, |
|
423 | 423 | desc=message, |
|
424 | 424 | rev=version, |
|
425 | 425 | extra=extra, |
|
426 | 426 | saverev=saverev, |
|
427 | 427 | ) |
|
428 | 428 | return c |
|
429 | 429 | |
|
430 | 430 | def numcommits(self): |
|
431 | 431 | output, ret = self.gitrunlines(b'rev-list', b'--all') |
|
432 | 432 | if ret: |
|
433 | 433 | raise error.Abort( |
|
434 | 434 | _(b'cannot retrieve number of commits in %s') % self.path |
|
435 | 435 | ) |
|
436 | 436 | return len(output) |
|
437 | 437 | |
|
438 | 438 | def gettags(self): |
|
439 | 439 | tags = {} |
|
440 | 440 | alltags = {} |
|
441 | 441 | output, status = self.gitrunlines(b'ls-remote', b'--tags', self.path) |
|
442 | 442 | |
|
443 | 443 | if status: |
|
444 | 444 | raise error.Abort(_(b'cannot read tags from %s') % self.path) |
|
445 | 445 | prefix = b'refs/tags/' |
|
446 | 446 | |
|
447 | 447 | # Build complete list of tags, both annotated and bare ones |
|
448 | 448 | for line in output: |
|
449 | 449 | line = line.strip() |
|
450 | 450 | if line.startswith(b"error:") or line.startswith(b"fatal:"): |
|
451 | 451 | raise error.Abort(_(b'cannot read tags from %s') % self.path) |
|
452 | 452 | node, tag = line.split(None, 1) |
|
453 | 453 | if not tag.startswith(prefix): |
|
454 | 454 | continue |
|
455 | 455 | alltags[tag[len(prefix) :]] = node |
|
456 | 456 | |
|
457 | 457 | # Filter out tag objects for annotated tag refs |
|
458 | 458 | for tag in alltags: |
|
459 | 459 | if tag.endswith(b'^{}'): |
|
460 | 460 | tags[tag[:-3]] = alltags[tag] |
|
461 | 461 | else: |
|
462 | 462 | if tag + b'^{}' in alltags: |
|
463 | 463 | continue |
|
464 | 464 | else: |
|
465 | 465 | tags[tag] = alltags[tag] |
|
466 | 466 | |
|
467 | 467 | return tags |
|
468 | 468 | |
|
469 | 469 | def getchangedfiles(self, version, i): |
|
470 | 470 | changes = [] |
|
471 | 471 | if i is None: |
|
472 | 472 | output, status = self.gitrunlines( |
|
473 | 473 | b'diff-tree', b'--root', b'-m', b'-r', version |
|
474 | 474 | ) |
|
475 | 475 | if status: |
|
476 | 476 | raise error.Abort(_(b'cannot read changes in %s') % version) |
|
477 | 477 | for l in output: |
|
478 | 478 | if b"\t" not in l: |
|
479 | 479 | continue |
|
480 | 480 | m, f = l[:-1].split(b"\t") |
|
481 | 481 | changes.append(f) |
|
482 | 482 | else: |
|
483 | 483 | output, status = self.gitrunlines( |
|
484 | 484 | b'diff-tree', |
|
485 | 485 | b'--name-only', |
|
486 | 486 | b'--root', |
|
487 | 487 | b'-r', |
|
488 | 488 | version, |
|
489 | 489 | b'%s^%d' % (version, i + 1), |
|
490 | 490 | b'--', |
|
491 | 491 | ) |
|
492 | 492 | if status: |
|
493 | 493 | raise error.Abort(_(b'cannot read changes in %s') % version) |
|
494 | 494 | changes = [f.rstrip(b'\n') for f in output] |
|
495 | 495 | |
|
496 | 496 | return changes |
|
497 | 497 | |
|
498 | 498 | def getbookmarks(self): |
|
499 | 499 | bookmarks = {} |
|
500 | 500 | |
|
501 | 501 | # Handle local and remote branches |
|
502 | 502 | remoteprefix = self.ui.config(b'convert', b'git.remoteprefix') |
|
503 | 503 | reftypes = [ |
|
504 | 504 | # (git prefix, hg prefix) |
|
505 | 505 | (b'refs/remotes/origin/', remoteprefix + b'/'), |
|
506 | 506 | (b'refs/heads/', b''), |
|
507 | 507 | ] |
|
508 | 508 | |
|
509 | 509 | exclude = { |
|
510 | 510 | b'refs/remotes/origin/HEAD', |
|
511 | 511 | } |
|
512 | 512 | |
|
513 | 513 | try: |
|
514 | 514 | output, status = self.gitrunlines(b'show-ref') |
|
515 | 515 | for line in output: |
|
516 | 516 | line = line.strip() |
|
517 | 517 | rev, name = line.split(None, 1) |
|
518 | 518 | # Process each type of branch |
|
519 | 519 | for gitprefix, hgprefix in reftypes: |
|
520 | 520 | if not name.startswith(gitprefix) or name in exclude: |
|
521 | 521 | continue |
|
522 | 522 | name = b'%s%s' % (hgprefix, name[len(gitprefix) :]) |
|
523 | 523 | bookmarks[name] = rev |
|
524 | 524 | except Exception: |
|
525 | 525 | pass |
|
526 | 526 | |
|
527 | 527 | return bookmarks |
|
528 | 528 | |
|
529 | 529 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
530 | 530 | """ git revision string is a 40 byte hex """ |
|
531 | 531 | self.checkhexformat(revstr, mapname) |
@@ -1,733 +1,732 b'' | |||
|
1 | 1 | # hg.py - hg backend for convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | # Notes for hg->hg conversion: |
|
9 | 9 | # |
|
10 | 10 | # * Old versions of Mercurial didn't trim the whitespace from the ends |
|
11 | 11 | # of commit messages, but new versions do. Changesets created by |
|
12 | 12 | # those older versions, then converted, may thus have different |
|
13 | 13 | # hashes for changesets that are otherwise identical. |
|
14 | 14 | # |
|
15 | 15 | # * Using "--config convert.hg.saverev=true" will make the source |
|
16 | 16 | # identifier to be stored in the converted revision. This will cause |
|
17 | 17 | # the converted revision to have a different identity than the |
|
18 | 18 | # source. |
|
19 | 19 | from __future__ import absolute_import |
|
20 | 20 | |
|
21 | 21 | import os |
|
22 | 22 | import re |
|
23 | 23 | import time |
|
24 | 24 | |
|
25 | 25 | from mercurial.i18n import _ |
|
26 | 26 | from mercurial.pycompat import open |
|
27 | 27 | from mercurial.node import ( |
|
28 | 28 | bin, |
|
29 | 29 | hex, |
|
30 | nullhex, | |
|
31 | nullid, | |
|
30 | sha1nodeconstants, | |
|
32 | 31 | ) |
|
33 | 32 | from mercurial import ( |
|
34 | 33 | bookmarks, |
|
35 | 34 | context, |
|
36 | 35 | error, |
|
37 | 36 | exchange, |
|
38 | 37 | hg, |
|
39 | 38 | lock as lockmod, |
|
40 | 39 | merge as mergemod, |
|
41 | 40 | phases, |
|
42 | 41 | pycompat, |
|
43 | 42 | scmutil, |
|
44 | 43 | util, |
|
45 | 44 | ) |
|
46 | 45 | from mercurial.utils import dateutil |
|
47 | 46 | |
|
48 | 47 | stringio = util.stringio |
|
49 | 48 | |
|
50 | 49 | from . import common |
|
51 | 50 | |
|
52 | 51 | mapfile = common.mapfile |
|
53 | 52 | NoRepo = common.NoRepo |
|
54 | 53 | |
|
55 | 54 | sha1re = re.compile(br'\b[0-9a-f]{12,40}\b') |
|
56 | 55 | |
|
57 | 56 | |
|
58 | 57 | class mercurial_sink(common.converter_sink): |
|
59 | 58 | def __init__(self, ui, repotype, path): |
|
60 | 59 | common.converter_sink.__init__(self, ui, repotype, path) |
|
61 | 60 | self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames') |
|
62 | 61 | self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches') |
|
63 | 62 | self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch') |
|
64 | 63 | self.lastbranch = None |
|
65 | 64 | if os.path.isdir(path) and len(os.listdir(path)) > 0: |
|
66 | 65 | try: |
|
67 | 66 | self.repo = hg.repository(self.ui, path) |
|
68 | 67 | if not self.repo.local(): |
|
69 | 68 | raise NoRepo( |
|
70 | 69 | _(b'%s is not a local Mercurial repository') % path |
|
71 | 70 | ) |
|
72 | 71 | except error.RepoError as err: |
|
73 | 72 | ui.traceback() |
|
74 | 73 | raise NoRepo(err.args[0]) |
|
75 | 74 | else: |
|
76 | 75 | try: |
|
77 | 76 | ui.status(_(b'initializing destination %s repository\n') % path) |
|
78 | 77 | self.repo = hg.repository(self.ui, path, create=True) |
|
79 | 78 | if not self.repo.local(): |
|
80 | 79 | raise NoRepo( |
|
81 | 80 | _(b'%s is not a local Mercurial repository') % path |
|
82 | 81 | ) |
|
83 | 82 | self.created.append(path) |
|
84 | 83 | except error.RepoError: |
|
85 | 84 | ui.traceback() |
|
86 | 85 | raise NoRepo( |
|
87 | 86 | _(b"could not create hg repository %s as sink") % path |
|
88 | 87 | ) |
|
89 | 88 | self.lock = None |
|
90 | 89 | self.wlock = None |
|
91 | 90 | self.filemapmode = False |
|
92 | 91 | self.subrevmaps = {} |
|
93 | 92 | |
|
94 | 93 | def before(self): |
|
95 | 94 | self.ui.debug(b'run hg sink pre-conversion action\n') |
|
96 | 95 | self.wlock = self.repo.wlock() |
|
97 | 96 | self.lock = self.repo.lock() |
|
98 | 97 | |
|
99 | 98 | def after(self): |
|
100 | 99 | self.ui.debug(b'run hg sink post-conversion action\n') |
|
101 | 100 | if self.lock: |
|
102 | 101 | self.lock.release() |
|
103 | 102 | if self.wlock: |
|
104 | 103 | self.wlock.release() |
|
105 | 104 | |
|
106 | 105 | def revmapfile(self): |
|
107 | 106 | return self.repo.vfs.join(b"shamap") |
|
108 | 107 | |
|
109 | 108 | def authorfile(self): |
|
110 | 109 | return self.repo.vfs.join(b"authormap") |
|
111 | 110 | |
|
112 | 111 | def setbranch(self, branch, pbranches): |
|
113 | 112 | if not self.clonebranches: |
|
114 | 113 | return |
|
115 | 114 | |
|
116 | 115 | setbranch = branch != self.lastbranch |
|
117 | 116 | self.lastbranch = branch |
|
118 | 117 | if not branch: |
|
119 | 118 | branch = b'default' |
|
120 | 119 | pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches] |
|
121 | 120 | |
|
122 | 121 | branchpath = os.path.join(self.path, branch) |
|
123 | 122 | if setbranch: |
|
124 | 123 | self.after() |
|
125 | 124 | try: |
|
126 | 125 | self.repo = hg.repository(self.ui, branchpath) |
|
127 | 126 | except Exception: |
|
128 | 127 | self.repo = hg.repository(self.ui, branchpath, create=True) |
|
129 | 128 | self.before() |
|
130 | 129 | |
|
131 | 130 | # pbranches may bring revisions from other branches (merge parents) |
|
132 | 131 | # Make sure we have them, or pull them. |
|
133 | 132 | missings = {} |
|
134 | 133 | for b in pbranches: |
|
135 | 134 | try: |
|
136 | 135 | self.repo.lookup(b[0]) |
|
137 | 136 | except Exception: |
|
138 | 137 | missings.setdefault(b[1], []).append(b[0]) |
|
139 | 138 | |
|
140 | 139 | if missings: |
|
141 | 140 | self.after() |
|
142 | 141 | for pbranch, heads in sorted(pycompat.iteritems(missings)): |
|
143 | 142 | pbranchpath = os.path.join(self.path, pbranch) |
|
144 | 143 | prepo = hg.peer(self.ui, {}, pbranchpath) |
|
145 | 144 | self.ui.note( |
|
146 | 145 | _(b'pulling from %s into %s\n') % (pbranch, branch) |
|
147 | 146 | ) |
|
148 | 147 | exchange.pull( |
|
149 | 148 | self.repo, prepo, [prepo.lookup(h) for h in heads] |
|
150 | 149 | ) |
|
151 | 150 | self.before() |
|
152 | 151 | |
|
153 | 152 | def _rewritetags(self, source, revmap, data): |
|
154 | 153 | fp = stringio() |
|
155 | 154 | for line in data.splitlines(): |
|
156 | 155 | s = line.split(b' ', 1) |
|
157 | 156 | if len(s) != 2: |
|
158 | 157 | self.ui.warn(_(b'invalid tag entry: "%s"\n') % line) |
|
159 | 158 | fp.write(b'%s\n' % line) # Bogus, but keep for hash stability |
|
160 | 159 | continue |
|
161 | 160 | revid = revmap.get(source.lookuprev(s[0])) |
|
162 | 161 | if not revid: |
|
163 | if s[0] == nullhex: | |
|
162 | if s[0] == sha1nodeconstants.nullhex: | |
|
164 | 163 | revid = s[0] |
|
165 | 164 | else: |
|
166 | 165 | # missing, but keep for hash stability |
|
167 | 166 | self.ui.warn(_(b'missing tag entry: "%s"\n') % line) |
|
168 | 167 | fp.write(b'%s\n' % line) |
|
169 | 168 | continue |
|
170 | 169 | fp.write(b'%s %s\n' % (revid, s[1])) |
|
171 | 170 | return fp.getvalue() |
|
172 | 171 | |
|
173 | 172 | def _rewritesubstate(self, source, data): |
|
174 | 173 | fp = stringio() |
|
175 | 174 | for line in data.splitlines(): |
|
176 | 175 | s = line.split(b' ', 1) |
|
177 | 176 | if len(s) != 2: |
|
178 | 177 | continue |
|
179 | 178 | |
|
180 | 179 | revid = s[0] |
|
181 | 180 | subpath = s[1] |
|
182 | if revid != nullhex: | |
|
181 | if revid != sha1nodeconstants.nullhex: | |
|
183 | 182 | revmap = self.subrevmaps.get(subpath) |
|
184 | 183 | if revmap is None: |
|
185 | 184 | revmap = mapfile( |
|
186 | 185 | self.ui, self.repo.wjoin(subpath, b'.hg/shamap') |
|
187 | 186 | ) |
|
188 | 187 | self.subrevmaps[subpath] = revmap |
|
189 | 188 | |
|
190 | 189 | # It is reasonable that one or more of the subrepos don't |
|
191 | 190 | # need to be converted, in which case they can be cloned |
|
192 | 191 | # into place instead of converted. Therefore, only warn |
|
193 | 192 | # once. |
|
194 | 193 | msg = _(b'no ".hgsubstate" updates will be made for "%s"\n') |
|
195 | 194 | if len(revmap) == 0: |
|
196 | 195 | sub = self.repo.wvfs.reljoin(subpath, b'.hg') |
|
197 | 196 | |
|
198 | 197 | if self.repo.wvfs.exists(sub): |
|
199 | 198 | self.ui.warn(msg % subpath) |
|
200 | 199 | |
|
201 | 200 | newid = revmap.get(revid) |
|
202 | 201 | if not newid: |
|
203 | 202 | if len(revmap) > 0: |
|
204 | 203 | self.ui.warn( |
|
205 | 204 | _(b"%s is missing from %s/.hg/shamap\n") |
|
206 | 205 | % (revid, subpath) |
|
207 | 206 | ) |
|
208 | 207 | else: |
|
209 | 208 | revid = newid |
|
210 | 209 | |
|
211 | 210 | fp.write(b'%s %s\n' % (revid, subpath)) |
|
212 | 211 | |
|
213 | 212 | return fp.getvalue() |
|
214 | 213 | |
|
215 | 214 | def _calculatemergedfiles(self, source, p1ctx, p2ctx): |
|
216 | 215 | """Calculates the files from p2 that we need to pull in when merging p1 |
|
217 | 216 | and p2, given that the merge is coming from the given source. |
|
218 | 217 | |
|
219 | 218 | This prevents us from losing files that only exist in the target p2 and |
|
220 | 219 | that don't come from the source repo (like if you're merging multiple |
|
221 | 220 | repositories together). |
|
222 | 221 | """ |
|
223 | 222 | anc = [p1ctx.ancestor(p2ctx)] |
|
224 | 223 | # Calculate what files are coming from p2 |
|
225 | 224 | # TODO: mresult.commitinfo might be able to get that info |
|
226 | 225 | mresult = mergemod.calculateupdates( |
|
227 | 226 | self.repo, |
|
228 | 227 | p1ctx, |
|
229 | 228 | p2ctx, |
|
230 | 229 | anc, |
|
231 | 230 | branchmerge=True, |
|
232 | 231 | force=True, |
|
233 | 232 | acceptremote=False, |
|
234 | 233 | followcopies=False, |
|
235 | 234 | ) |
|
236 | 235 | |
|
237 | 236 | for file, (action, info, msg) in mresult.filemap(): |
|
238 | 237 | if source.targetfilebelongstosource(file): |
|
239 | 238 | # If the file belongs to the source repo, ignore the p2 |
|
240 | 239 | # since it will be covered by the existing fileset. |
|
241 | 240 | continue |
|
242 | 241 | |
|
243 | 242 | # If the file requires actual merging, abort. We don't have enough |
|
244 | 243 | # context to resolve merges correctly. |
|
245 | 244 | if action in [b'm', b'dm', b'cd', b'dc']: |
|
246 | 245 | raise error.Abort( |
|
247 | 246 | _( |
|
248 | 247 | b"unable to convert merge commit " |
|
249 | 248 | b"since target parents do not merge cleanly (file " |
|
250 | 249 | b"%s, parents %s and %s)" |
|
251 | 250 | ) |
|
252 | 251 | % (file, p1ctx, p2ctx) |
|
253 | 252 | ) |
|
254 | 253 | elif action == b'k': |
|
255 | 254 | # 'keep' means nothing changed from p1 |
|
256 | 255 | continue |
|
257 | 256 | else: |
|
258 | 257 | # Any other change means we want to take the p2 version |
|
259 | 258 | yield file |
|
260 | 259 | |
|
261 | 260 | def putcommit( |
|
262 | 261 | self, files, copies, parents, commit, source, revmap, full, cleanp2 |
|
263 | 262 | ): |
|
264 | 263 | files = dict(files) |
|
265 | 264 | |
|
266 | 265 | def getfilectx(repo, memctx, f): |
|
267 | 266 | if p2ctx and f in p2files and f not in copies: |
|
268 | 267 | self.ui.debug(b'reusing %s from p2\n' % f) |
|
269 | 268 | try: |
|
270 | 269 | return p2ctx[f] |
|
271 | 270 | except error.ManifestLookupError: |
|
272 | 271 | # If the file doesn't exist in p2, then we're syncing a |
|
273 | 272 | # delete, so just return None. |
|
274 | 273 | return None |
|
275 | 274 | try: |
|
276 | 275 | v = files[f] |
|
277 | 276 | except KeyError: |
|
278 | 277 | return None |
|
279 | 278 | data, mode = source.getfile(f, v) |
|
280 | 279 | if data is None: |
|
281 | 280 | return None |
|
282 | 281 | if f == b'.hgtags': |
|
283 | 282 | data = self._rewritetags(source, revmap, data) |
|
284 | 283 | if f == b'.hgsubstate': |
|
285 | 284 | data = self._rewritesubstate(source, data) |
|
286 | 285 | return context.memfilectx( |
|
287 | 286 | self.repo, |
|
288 | 287 | memctx, |
|
289 | 288 | f, |
|
290 | 289 | data, |
|
291 | 290 | b'l' in mode, |
|
292 | 291 | b'x' in mode, |
|
293 | 292 | copies.get(f), |
|
294 | 293 | ) |
|
295 | 294 | |
|
296 | 295 | pl = [] |
|
297 | 296 | for p in parents: |
|
298 | 297 | if p not in pl: |
|
299 | 298 | pl.append(p) |
|
300 | 299 | parents = pl |
|
301 | 300 | nparents = len(parents) |
|
302 | 301 | if self.filemapmode and nparents == 1: |
|
303 | 302 | m1node = self.repo.changelog.read(bin(parents[0]))[0] |
|
304 | 303 | parent = parents[0] |
|
305 | 304 | |
|
306 | 305 | if len(parents) < 2: |
|
307 | parents.append(nullid) | |
|
306 | parents.append(self.repo.nullid) | |
|
308 | 307 | if len(parents) < 2: |
|
309 | parents.append(nullid) | |
|
308 | parents.append(self.repo.nullid) | |
|
310 | 309 | p2 = parents.pop(0) |
|
311 | 310 | |
|
312 | 311 | text = commit.desc |
|
313 | 312 | |
|
314 | 313 | sha1s = re.findall(sha1re, text) |
|
315 | 314 | for sha1 in sha1s: |
|
316 | 315 | oldrev = source.lookuprev(sha1) |
|
317 | 316 | newrev = revmap.get(oldrev) |
|
318 | 317 | if newrev is not None: |
|
319 | 318 | text = text.replace(sha1, newrev[: len(sha1)]) |
|
320 | 319 | |
|
321 | 320 | extra = commit.extra.copy() |
|
322 | 321 | |
|
323 | 322 | sourcename = self.repo.ui.config(b'convert', b'hg.sourcename') |
|
324 | 323 | if sourcename: |
|
325 | 324 | extra[b'convert_source'] = sourcename |
|
326 | 325 | |
|
327 | 326 | for label in ( |
|
328 | 327 | b'source', |
|
329 | 328 | b'transplant_source', |
|
330 | 329 | b'rebase_source', |
|
331 | 330 | b'intermediate-source', |
|
332 | 331 | ): |
|
333 | 332 | node = extra.get(label) |
|
334 | 333 | |
|
335 | 334 | if node is None: |
|
336 | 335 | continue |
|
337 | 336 | |
|
338 | 337 | # Only transplant stores its reference in binary |
|
339 | 338 | if label == b'transplant_source': |
|
340 | 339 | node = hex(node) |
|
341 | 340 | |
|
342 | 341 | newrev = revmap.get(node) |
|
343 | 342 | if newrev is not None: |
|
344 | 343 | if label == b'transplant_source': |
|
345 | 344 | newrev = bin(newrev) |
|
346 | 345 | |
|
347 | 346 | extra[label] = newrev |
|
348 | 347 | |
|
349 | 348 | if self.branchnames and commit.branch: |
|
350 | 349 | extra[b'branch'] = commit.branch |
|
351 | 350 | if commit.rev and commit.saverev: |
|
352 | 351 | extra[b'convert_revision'] = commit.rev |
|
353 | 352 | |
|
354 | 353 | while parents: |
|
355 | 354 | p1 = p2 |
|
356 | 355 | p2 = parents.pop(0) |
|
357 | 356 | p1ctx = self.repo[p1] |
|
358 | 357 | p2ctx = None |
|
359 | if p2 != nullid: | |
|
358 | if p2 != self.repo.nullid: | |
|
360 | 359 | p2ctx = self.repo[p2] |
|
361 | 360 | fileset = set(files) |
|
362 | 361 | if full: |
|
363 | 362 | fileset.update(self.repo[p1]) |
|
364 | 363 | fileset.update(self.repo[p2]) |
|
365 | 364 | |
|
366 | 365 | if p2ctx: |
|
367 | 366 | p2files = set(cleanp2) |
|
368 | 367 | for file in self._calculatemergedfiles(source, p1ctx, p2ctx): |
|
369 | 368 | p2files.add(file) |
|
370 | 369 | fileset.add(file) |
|
371 | 370 | |
|
372 | 371 | ctx = context.memctx( |
|
373 | 372 | self.repo, |
|
374 | 373 | (p1, p2), |
|
375 | 374 | text, |
|
376 | 375 | fileset, |
|
377 | 376 | getfilectx, |
|
378 | 377 | commit.author, |
|
379 | 378 | commit.date, |
|
380 | 379 | extra, |
|
381 | 380 | ) |
|
382 | 381 | |
|
383 | 382 | # We won't know if the conversion changes the node until after the |
|
384 | 383 | # commit, so copy the source's phase for now. |
|
385 | 384 | self.repo.ui.setconfig( |
|
386 | 385 | b'phases', |
|
387 | 386 | b'new-commit', |
|
388 | 387 | phases.phasenames[commit.phase], |
|
389 | 388 | b'convert', |
|
390 | 389 | ) |
|
391 | 390 | |
|
392 | 391 | with self.repo.transaction(b"convert") as tr: |
|
393 | 392 | if self.repo.ui.config(b'convert', b'hg.preserve-hash'): |
|
394 | 393 | origctx = commit.ctx |
|
395 | 394 | else: |
|
396 | 395 | origctx = None |
|
397 | 396 | node = hex(self.repo.commitctx(ctx, origctx=origctx)) |
|
398 | 397 | |
|
399 | 398 | # If the node value has changed, but the phase is lower than |
|
400 | 399 | # draft, set it back to draft since it hasn't been exposed |
|
401 | 400 | # anywhere. |
|
402 | 401 | if commit.rev != node: |
|
403 | 402 | ctx = self.repo[node] |
|
404 | 403 | if ctx.phase() < phases.draft: |
|
405 | 404 | phases.registernew( |
|
406 | 405 | self.repo, tr, phases.draft, [ctx.rev()] |
|
407 | 406 | ) |
|
408 | 407 | |
|
409 | 408 | text = b"(octopus merge fixup)\n" |
|
410 | 409 | p2 = node |
|
411 | 410 | |
|
412 | 411 | if self.filemapmode and nparents == 1: |
|
413 | 412 | man = self.repo.manifestlog.getstorage(b'') |
|
414 | 413 | mnode = self.repo.changelog.read(bin(p2))[0] |
|
415 | 414 | closed = b'close' in commit.extra |
|
416 | 415 | if not closed and not man.cmp(m1node, man.revision(mnode)): |
|
417 | 416 | self.ui.status(_(b"filtering out empty revision\n")) |
|
418 | 417 | self.repo.rollback(force=True) |
|
419 | 418 | return parent |
|
420 | 419 | return p2 |
|
421 | 420 | |
|
422 | 421 | def puttags(self, tags): |
|
423 | 422 | tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True) |
|
424 | tagparent = tagparent or nullid | |
|
423 | tagparent = tagparent or self.repo.nullid | |
|
425 | 424 | |
|
426 | 425 | oldlines = set() |
|
427 | 426 | for branch, heads in pycompat.iteritems(self.repo.branchmap()): |
|
428 | 427 | for h in heads: |
|
429 | 428 | if b'.hgtags' in self.repo[h]: |
|
430 | 429 | oldlines.update( |
|
431 | 430 | set(self.repo[h][b'.hgtags'].data().splitlines(True)) |
|
432 | 431 | ) |
|
433 | 432 | oldlines = sorted(list(oldlines)) |
|
434 | 433 | |
|
435 | 434 | newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags]) |
|
436 | 435 | if newlines == oldlines: |
|
437 | 436 | return None, None |
|
438 | 437 | |
|
439 | 438 | # if the old and new tags match, then there is nothing to update |
|
440 | 439 | oldtags = set() |
|
441 | 440 | newtags = set() |
|
442 | 441 | for line in oldlines: |
|
443 | 442 | s = line.strip().split(b' ', 1) |
|
444 | 443 | if len(s) != 2: |
|
445 | 444 | continue |
|
446 | 445 | oldtags.add(s[1]) |
|
447 | 446 | for line in newlines: |
|
448 | 447 | s = line.strip().split(b' ', 1) |
|
449 | 448 | if len(s) != 2: |
|
450 | 449 | continue |
|
451 | 450 | if s[1] not in oldtags: |
|
452 | 451 | newtags.add(s[1].strip()) |
|
453 | 452 | |
|
454 | 453 | if not newtags: |
|
455 | 454 | return None, None |
|
456 | 455 | |
|
457 | 456 | data = b"".join(newlines) |
|
458 | 457 | |
|
459 | 458 | def getfilectx(repo, memctx, f): |
|
460 | 459 | return context.memfilectx(repo, memctx, f, data, False, False, None) |
|
461 | 460 | |
|
462 | 461 | self.ui.status(_(b"updating tags\n")) |
|
463 | 462 | date = b"%d 0" % int(time.mktime(time.gmtime())) |
|
464 | 463 | extra = {b'branch': self.tagsbranch} |
|
465 | 464 | ctx = context.memctx( |
|
466 | 465 | self.repo, |
|
467 | 466 | (tagparent, None), |
|
468 | 467 | b"update tags", |
|
469 | 468 | [b".hgtags"], |
|
470 | 469 | getfilectx, |
|
471 | 470 | b"convert-repo", |
|
472 | 471 | date, |
|
473 | 472 | extra, |
|
474 | 473 | ) |
|
475 | 474 | node = self.repo.commitctx(ctx) |
|
476 | 475 | return hex(node), hex(tagparent) |
|
477 | 476 | |
|
478 | 477 | def setfilemapmode(self, active): |
|
479 | 478 | self.filemapmode = active |
|
480 | 479 | |
|
481 | 480 | def putbookmarks(self, updatedbookmark): |
|
482 | 481 | if not len(updatedbookmark): |
|
483 | 482 | return |
|
484 | 483 | wlock = lock = tr = None |
|
485 | 484 | try: |
|
486 | 485 | wlock = self.repo.wlock() |
|
487 | 486 | lock = self.repo.lock() |
|
488 | 487 | tr = self.repo.transaction(b'bookmark') |
|
489 | 488 | self.ui.status(_(b"updating bookmarks\n")) |
|
490 | 489 | destmarks = self.repo._bookmarks |
|
491 | 490 | changes = [ |
|
492 | 491 | (bookmark, bin(updatedbookmark[bookmark])) |
|
493 | 492 | for bookmark in updatedbookmark |
|
494 | 493 | ] |
|
495 | 494 | destmarks.applychanges(self.repo, tr, changes) |
|
496 | 495 | tr.close() |
|
497 | 496 | finally: |
|
498 | 497 | lockmod.release(lock, wlock, tr) |
|
499 | 498 | |
|
500 | 499 | def hascommitfrommap(self, rev): |
|
501 | 500 | # the exact semantics of clonebranches is unclear so we can't say no |
|
502 | 501 | return rev in self.repo or self.clonebranches |
|
503 | 502 | |
|
504 | 503 | def hascommitforsplicemap(self, rev): |
|
505 | 504 | if rev not in self.repo and self.clonebranches: |
|
506 | 505 | raise error.Abort( |
|
507 | 506 | _( |
|
508 | 507 | b'revision %s not found in destination ' |
|
509 | 508 | b'repository (lookups with clonebranches=true ' |
|
510 | 509 | b'are not implemented)' |
|
511 | 510 | ) |
|
512 | 511 | % rev |
|
513 | 512 | ) |
|
514 | 513 | return rev in self.repo |
|
515 | 514 | |
|
516 | 515 | |
|
517 | 516 | class mercurial_source(common.converter_source): |
|
518 | 517 | def __init__(self, ui, repotype, path, revs=None): |
|
519 | 518 | common.converter_source.__init__(self, ui, repotype, path, revs) |
|
520 | 519 | self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors') |
|
521 | 520 | self.ignored = set() |
|
522 | 521 | self.saverev = ui.configbool(b'convert', b'hg.saverev') |
|
523 | 522 | try: |
|
524 | 523 | self.repo = hg.repository(self.ui, path) |
|
525 | 524 | # try to provoke an exception if this isn't really a hg |
|
526 | 525 | # repo, but some other bogus compatible-looking url |
|
527 | 526 | if not self.repo.local(): |
|
528 | 527 | raise error.RepoError |
|
529 | 528 | except error.RepoError: |
|
530 | 529 | ui.traceback() |
|
531 | 530 | raise NoRepo(_(b"%s is not a local Mercurial repository") % path) |
|
532 | 531 | self.lastrev = None |
|
533 | 532 | self.lastctx = None |
|
534 | 533 | self._changescache = None, None |
|
535 | 534 | self.convertfp = None |
|
536 | 535 | # Restrict converted revisions to startrev descendants |
|
537 | 536 | startnode = ui.config(b'convert', b'hg.startrev') |
|
538 | 537 | hgrevs = ui.config(b'convert', b'hg.revs') |
|
539 | 538 | if hgrevs is None: |
|
540 | 539 | if startnode is not None: |
|
541 | 540 | try: |
|
542 | 541 | startnode = self.repo.lookup(startnode) |
|
543 | 542 | except error.RepoError: |
|
544 | 543 | raise error.Abort( |
|
545 | 544 | _(b'%s is not a valid start revision') % startnode |
|
546 | 545 | ) |
|
547 | 546 | startrev = self.repo.changelog.rev(startnode) |
|
548 | 547 | children = {startnode: 1} |
|
549 | 548 | for r in self.repo.changelog.descendants([startrev]): |
|
550 | 549 | children[self.repo.changelog.node(r)] = 1 |
|
551 | 550 | self.keep = children.__contains__ |
|
552 | 551 | else: |
|
553 | 552 | self.keep = util.always |
|
554 | 553 | if revs: |
|
555 | 554 | self._heads = [self.repo.lookup(r) for r in revs] |
|
556 | 555 | else: |
|
557 | 556 | self._heads = self.repo.heads() |
|
558 | 557 | else: |
|
559 | 558 | if revs or startnode is not None: |
|
560 | 559 | raise error.Abort( |
|
561 | 560 | _( |
|
562 | 561 | b'hg.revs cannot be combined with ' |
|
563 | 562 | b'hg.startrev or --rev' |
|
564 | 563 | ) |
|
565 | 564 | ) |
|
566 | 565 | nodes = set() |
|
567 | 566 | parents = set() |
|
568 | 567 | for r in scmutil.revrange(self.repo, [hgrevs]): |
|
569 | 568 | ctx = self.repo[r] |
|
570 | 569 | nodes.add(ctx.node()) |
|
571 | 570 | parents.update(p.node() for p in ctx.parents()) |
|
572 | 571 | self.keep = nodes.__contains__ |
|
573 | 572 | self._heads = nodes - parents |
|
574 | 573 | |
|
575 | 574 | def _changectx(self, rev): |
|
576 | 575 | if self.lastrev != rev: |
|
577 | 576 | self.lastctx = self.repo[rev] |
|
578 | 577 | self.lastrev = rev |
|
579 | 578 | return self.lastctx |
|
580 | 579 | |
|
581 | 580 | def _parents(self, ctx): |
|
582 | 581 | return [p for p in ctx.parents() if p and self.keep(p.node())] |
|
583 | 582 | |
|
584 | 583 | def getheads(self): |
|
585 | 584 | return [hex(h) for h in self._heads if self.keep(h)] |
|
586 | 585 | |
|
587 | 586 | def getfile(self, name, rev): |
|
588 | 587 | try: |
|
589 | 588 | fctx = self._changectx(rev)[name] |
|
590 | 589 | return fctx.data(), fctx.flags() |
|
591 | 590 | except error.LookupError: |
|
592 | 591 | return None, None |
|
593 | 592 | |
|
594 | 593 | def _changedfiles(self, ctx1, ctx2): |
|
595 | 594 | ma, r = [], [] |
|
596 | 595 | maappend = ma.append |
|
597 | 596 | rappend = r.append |
|
598 | 597 | d = ctx1.manifest().diff(ctx2.manifest()) |
|
599 | 598 | for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d): |
|
600 | 599 | if node2 is None: |
|
601 | 600 | rappend(f) |
|
602 | 601 | else: |
|
603 | 602 | maappend(f) |
|
604 | 603 | return ma, r |
|
605 | 604 | |
|
606 | 605 | def getchanges(self, rev, full): |
|
607 | 606 | ctx = self._changectx(rev) |
|
608 | 607 | parents = self._parents(ctx) |
|
609 | 608 | if full or not parents: |
|
610 | 609 | files = copyfiles = ctx.manifest() |
|
611 | 610 | if parents: |
|
612 | 611 | if self._changescache[0] == rev: |
|
613 | 612 | ma, r = self._changescache[1] |
|
614 | 613 | else: |
|
615 | 614 | ma, r = self._changedfiles(parents[0], ctx) |
|
616 | 615 | if not full: |
|
617 | 616 | files = ma + r |
|
618 | 617 | copyfiles = ma |
|
619 | 618 | # _getcopies() is also run for roots and before filtering so missing |
|
620 | 619 | # revlogs are detected early |
|
621 | 620 | copies = self._getcopies(ctx, parents, copyfiles) |
|
622 | 621 | cleanp2 = set() |
|
623 | 622 | if len(parents) == 2: |
|
624 | 623 | d = parents[1].manifest().diff(ctx.manifest(), clean=True) |
|
625 | 624 | for f, value in pycompat.iteritems(d): |
|
626 | 625 | if value is None: |
|
627 | 626 | cleanp2.add(f) |
|
628 | 627 | changes = [(f, rev) for f in files if f not in self.ignored] |
|
629 | 628 | changes.sort() |
|
630 | 629 | return changes, copies, cleanp2 |
|
631 | 630 | |
|
632 | 631 | def _getcopies(self, ctx, parents, files): |
|
633 | 632 | copies = {} |
|
634 | 633 | for name in files: |
|
635 | 634 | if name in self.ignored: |
|
636 | 635 | continue |
|
637 | 636 | try: |
|
638 | 637 | copysource = ctx.filectx(name).copysource() |
|
639 | 638 | if copysource in self.ignored: |
|
640 | 639 | continue |
|
641 | 640 | # Ignore copy sources not in parent revisions |
|
642 | 641 | if not any(copysource in p for p in parents): |
|
643 | 642 | continue |
|
644 | 643 | copies[name] = copysource |
|
645 | 644 | except TypeError: |
|
646 | 645 | pass |
|
647 | 646 | except error.LookupError as e: |
|
648 | 647 | if not self.ignoreerrors: |
|
649 | 648 | raise |
|
650 | 649 | self.ignored.add(name) |
|
651 | 650 | self.ui.warn(_(b'ignoring: %s\n') % e) |
|
652 | 651 | return copies |
|
653 | 652 | |
|
654 | 653 | def getcommit(self, rev): |
|
655 | 654 | ctx = self._changectx(rev) |
|
656 | 655 | _parents = self._parents(ctx) |
|
657 | 656 | parents = [p.hex() for p in _parents] |
|
658 | 657 | optparents = [p.hex() for p in ctx.parents() if p and p not in _parents] |
|
659 | 658 | crev = rev |
|
660 | 659 | |
|
661 | 660 | return common.commit( |
|
662 | 661 | author=ctx.user(), |
|
663 | 662 | date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'), |
|
664 | 663 | desc=ctx.description(), |
|
665 | 664 | rev=crev, |
|
666 | 665 | parents=parents, |
|
667 | 666 | optparents=optparents, |
|
668 | 667 | branch=ctx.branch(), |
|
669 | 668 | extra=ctx.extra(), |
|
670 | 669 | sortkey=ctx.rev(), |
|
671 | 670 | saverev=self.saverev, |
|
672 | 671 | phase=ctx.phase(), |
|
673 | 672 | ctx=ctx, |
|
674 | 673 | ) |
|
675 | 674 | |
|
676 | 675 | def numcommits(self): |
|
677 | 676 | return len(self.repo) |
|
678 | 677 | |
|
679 | 678 | def gettags(self): |
|
680 | 679 | # This will get written to .hgtags, filter non global tags out. |
|
681 | 680 | tags = [ |
|
682 | 681 | t |
|
683 | 682 | for t in self.repo.tagslist() |
|
684 | 683 | if self.repo.tagtype(t[0]) == b'global' |
|
685 | 684 | ] |
|
686 | 685 | return {name: hex(node) for name, node in tags if self.keep(node)} |
|
687 | 686 | |
|
688 | 687 | def getchangedfiles(self, rev, i): |
|
689 | 688 | ctx = self._changectx(rev) |
|
690 | 689 | parents = self._parents(ctx) |
|
691 | 690 | if not parents and i is None: |
|
692 | 691 | i = 0 |
|
693 | 692 | ma, r = ctx.manifest().keys(), [] |
|
694 | 693 | else: |
|
695 | 694 | i = i or 0 |
|
696 | 695 | ma, r = self._changedfiles(parents[i], ctx) |
|
697 | 696 | ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)] |
|
698 | 697 | |
|
699 | 698 | if i == 0: |
|
700 | 699 | self._changescache = (rev, (ma, r)) |
|
701 | 700 | |
|
702 | 701 | return ma + r |
|
703 | 702 | |
|
704 | 703 | def converted(self, rev, destrev): |
|
705 | 704 | if self.convertfp is None: |
|
706 | 705 | self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab') |
|
707 | 706 | self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev))) |
|
708 | 707 | self.convertfp.flush() |
|
709 | 708 | |
|
710 | 709 | def before(self): |
|
711 | 710 | self.ui.debug(b'run hg source pre-conversion action\n') |
|
712 | 711 | |
|
713 | 712 | def after(self): |
|
714 | 713 | self.ui.debug(b'run hg source post-conversion action\n') |
|
715 | 714 | |
|
716 | 715 | def hasnativeorder(self): |
|
717 | 716 | return True |
|
718 | 717 | |
|
719 | 718 | def hasnativeclose(self): |
|
720 | 719 | return True |
|
721 | 720 | |
|
722 | 721 | def lookuprev(self, rev): |
|
723 | 722 | try: |
|
724 | 723 | return hex(self.repo.lookup(rev)) |
|
725 | 724 | except (error.RepoError, error.LookupError): |
|
726 | 725 | return None |
|
727 | 726 | |
|
728 | 727 | def getbookmarks(self): |
|
729 | 728 | return bookmarks.listbookmarks(self.repo) |
|
730 | 729 | |
|
731 | 730 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
732 | 731 | """ Mercurial, revision string is a 40 byte hex """ |
|
733 | 732 | self.checkhexformat(revstr, mapname) |
@@ -1,341 +1,343 b'' | |||
|
1 | 1 | from __future__ import absolute_import |
|
2 | 2 | |
|
3 | 3 | import contextlib |
|
4 | 4 | import errno |
|
5 | 5 | import os |
|
6 | 6 | |
|
7 |
from mercurial.node import |
|
|
7 | from mercurial.node import sha1nodeconstants | |
|
8 | 8 | from mercurial import ( |
|
9 | 9 | error, |
|
10 | 10 | extensions, |
|
11 | 11 | match as matchmod, |
|
12 | 12 | pycompat, |
|
13 | 13 | scmutil, |
|
14 | 14 | util, |
|
15 | 15 | ) |
|
16 | 16 | from mercurial.interfaces import ( |
|
17 | 17 | dirstate as intdirstate, |
|
18 | 18 | util as interfaceutil, |
|
19 | 19 | ) |
|
20 | 20 | |
|
21 | 21 | from . import gitutil |
|
22 | 22 | |
|
23 | 23 | pygit2 = gitutil.get_pygit2() |
|
24 | 24 | |
|
25 | 25 | |
|
26 | 26 | def readpatternfile(orig, filepath, warn, sourceinfo=False): |
|
27 | 27 | if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')): |
|
28 | 28 | return orig(filepath, warn, sourceinfo=False) |
|
29 | 29 | result = [] |
|
30 | 30 | warnings = [] |
|
31 | 31 | with open(filepath, b'rb') as fp: |
|
32 | 32 | for l in fp: |
|
33 | 33 | l = l.strip() |
|
34 | 34 | if not l or l.startswith(b'#'): |
|
35 | 35 | continue |
|
36 | 36 | if l.startswith(b'!'): |
|
37 | 37 | warnings.append(b'unsupported ignore pattern %s' % l) |
|
38 | 38 | continue |
|
39 | 39 | if l.startswith(b'/'): |
|
40 | 40 | result.append(b'rootglob:' + l[1:]) |
|
41 | 41 | else: |
|
42 | 42 | result.append(b'relglob:' + l) |
|
43 | 43 | return result, warnings |
|
44 | 44 | |
|
45 | 45 | |
|
46 | 46 | extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile) |
|
47 | 47 | |
|
48 | 48 | |
|
49 | 49 | _STATUS_MAP = {} |
|
50 | 50 | if pygit2: |
|
51 | 51 | _STATUS_MAP = { |
|
52 | 52 | pygit2.GIT_STATUS_CONFLICTED: b'm', |
|
53 | 53 | pygit2.GIT_STATUS_CURRENT: b'n', |
|
54 | 54 | pygit2.GIT_STATUS_IGNORED: b'?', |
|
55 | 55 | pygit2.GIT_STATUS_INDEX_DELETED: b'r', |
|
56 | 56 | pygit2.GIT_STATUS_INDEX_MODIFIED: b'n', |
|
57 | 57 | pygit2.GIT_STATUS_INDEX_NEW: b'a', |
|
58 | 58 | pygit2.GIT_STATUS_INDEX_RENAMED: b'a', |
|
59 | 59 | pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n', |
|
60 | 60 | pygit2.GIT_STATUS_WT_DELETED: b'r', |
|
61 | 61 | pygit2.GIT_STATUS_WT_MODIFIED: b'n', |
|
62 | 62 | pygit2.GIT_STATUS_WT_NEW: b'?', |
|
63 | 63 | pygit2.GIT_STATUS_WT_RENAMED: b'a', |
|
64 | 64 | pygit2.GIT_STATUS_WT_TYPECHANGE: b'n', |
|
65 | 65 | pygit2.GIT_STATUS_WT_UNREADABLE: b'?', |
|
66 | 66 | pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: 'm', |
|
67 | 67 | } |
|
68 | 68 | |
|
69 | 69 | |
|
70 | 70 | @interfaceutil.implementer(intdirstate.idirstate) |
|
71 | 71 | class gitdirstate(object): |
|
72 | 72 | def __init__(self, ui, root, gitrepo): |
|
73 | 73 | self._ui = ui |
|
74 | 74 | self._root = os.path.dirname(root) |
|
75 | 75 | self.git = gitrepo |
|
76 | 76 | self._plchangecallbacks = {} |
|
77 | 77 | |
|
78 | 78 | def p1(self): |
|
79 | 79 | try: |
|
80 | 80 | return self.git.head.peel().id.raw |
|
81 | 81 | except pygit2.GitError: |
|
82 | 82 | # Typically happens when peeling HEAD fails, as in an |
|
83 | 83 | # empty repository. |
|
84 | return nullid | |
|
84 | return sha1nodeconstants.nullid | |
|
85 | 85 | |
|
86 | 86 | def p2(self): |
|
87 | 87 | # TODO: MERGE_HEAD? something like that, right? |
|
88 | return nullid | |
|
88 | return sha1nodeconstants.nullid | |
|
89 | 89 | |
|
90 |
def setparents(self, p1, p2= |
|
|
91 | assert p2 == nullid, b'TODO merging support' | |
|
90 | def setparents(self, p1, p2=None): | |
|
91 | if p2 is None: | |
|
92 | p2 = sha1nodeconstants.nullid | |
|
93 | assert p2 == sha1nodeconstants.nullid, b'TODO merging support' | |
|
92 | 94 | self.git.head.set_target(gitutil.togitnode(p1)) |
|
93 | 95 | |
|
94 | 96 | @util.propertycache |
|
95 | 97 | def identity(self): |
|
96 | 98 | return util.filestat.frompath( |
|
97 | 99 | os.path.join(self._root, b'.git', b'index') |
|
98 | 100 | ) |
|
99 | 101 | |
|
100 | 102 | def branch(self): |
|
101 | 103 | return b'default' |
|
102 | 104 | |
|
103 | 105 | def parents(self): |
|
104 | 106 | # TODO how on earth do we find p2 if a merge is in flight? |
|
105 | return self.p1(), nullid | |
|
107 | return self.p1(), sha1nodeconstants.nullid | |
|
106 | 108 | |
|
107 | 109 | def __iter__(self): |
|
108 | 110 | return (pycompat.fsencode(f.path) for f in self.git.index) |
|
109 | 111 | |
|
110 | 112 | def items(self): |
|
111 | 113 | for ie in self.git.index: |
|
112 | 114 | yield ie.path, None # value should be a dirstatetuple |
|
113 | 115 | |
|
114 | 116 | # py2,3 compat forward |
|
115 | 117 | iteritems = items |
|
116 | 118 | |
|
117 | 119 | def __getitem__(self, filename): |
|
118 | 120 | try: |
|
119 | 121 | gs = self.git.status_file(filename) |
|
120 | 122 | except KeyError: |
|
121 | 123 | return b'?' |
|
122 | 124 | return _STATUS_MAP[gs] |
|
123 | 125 | |
|
124 | 126 | def __contains__(self, filename): |
|
125 | 127 | try: |
|
126 | 128 | gs = self.git.status_file(filename) |
|
127 | 129 | return _STATUS_MAP[gs] != b'?' |
|
128 | 130 | except KeyError: |
|
129 | 131 | return False |
|
130 | 132 | |
|
131 | 133 | def status(self, match, subrepos, ignored, clean, unknown): |
|
132 | 134 | listclean = clean |
|
133 | 135 | # TODO handling of clean files - can we get that from git.status()? |
|
134 | 136 | modified, added, removed, deleted, unknown, ignored, clean = ( |
|
135 | 137 | [], |
|
136 | 138 | [], |
|
137 | 139 | [], |
|
138 | 140 | [], |
|
139 | 141 | [], |
|
140 | 142 | [], |
|
141 | 143 | [], |
|
142 | 144 | ) |
|
143 | 145 | gstatus = self.git.status() |
|
144 | 146 | for path, status in gstatus.items(): |
|
145 | 147 | path = pycompat.fsencode(path) |
|
146 | 148 | if not match(path): |
|
147 | 149 | continue |
|
148 | 150 | if status == pygit2.GIT_STATUS_IGNORED: |
|
149 | 151 | if path.endswith(b'/'): |
|
150 | 152 | continue |
|
151 | 153 | ignored.append(path) |
|
152 | 154 | elif status in ( |
|
153 | 155 | pygit2.GIT_STATUS_WT_MODIFIED, |
|
154 | 156 | pygit2.GIT_STATUS_INDEX_MODIFIED, |
|
155 | 157 | pygit2.GIT_STATUS_WT_MODIFIED |
|
156 | 158 | | pygit2.GIT_STATUS_INDEX_MODIFIED, |
|
157 | 159 | ): |
|
158 | 160 | modified.append(path) |
|
159 | 161 | elif status == pygit2.GIT_STATUS_INDEX_NEW: |
|
160 | 162 | added.append(path) |
|
161 | 163 | elif status == pygit2.GIT_STATUS_WT_NEW: |
|
162 | 164 | unknown.append(path) |
|
163 | 165 | elif status == pygit2.GIT_STATUS_WT_DELETED: |
|
164 | 166 | deleted.append(path) |
|
165 | 167 | elif status == pygit2.GIT_STATUS_INDEX_DELETED: |
|
166 | 168 | removed.append(path) |
|
167 | 169 | else: |
|
168 | 170 | raise error.Abort( |
|
169 | 171 | b'unhandled case: status for %r is %r' % (path, status) |
|
170 | 172 | ) |
|
171 | 173 | |
|
172 | 174 | if listclean: |
|
173 | 175 | observed = set( |
|
174 | 176 | modified + added + removed + deleted + unknown + ignored |
|
175 | 177 | ) |
|
176 | 178 | index = self.git.index |
|
177 | 179 | index.read() |
|
178 | 180 | for entry in index: |
|
179 | 181 | path = pycompat.fsencode(entry.path) |
|
180 | 182 | if not match(path): |
|
181 | 183 | continue |
|
182 | 184 | if path in observed: |
|
183 | 185 | continue # already in some other set |
|
184 | 186 | if path[-1] == b'/': |
|
185 | 187 | continue # directory |
|
186 | 188 | clean.append(path) |
|
187 | 189 | |
|
188 | 190 | # TODO are we really always sure of status here? |
|
189 | 191 | return ( |
|
190 | 192 | False, |
|
191 | 193 | scmutil.status( |
|
192 | 194 | modified, added, removed, deleted, unknown, ignored, clean |
|
193 | 195 | ), |
|
194 | 196 | ) |
|
195 | 197 | |
|
196 | 198 | def flagfunc(self, buildfallback): |
|
197 | 199 | # TODO we can do better |
|
198 | 200 | return buildfallback() |
|
199 | 201 | |
|
200 | 202 | def getcwd(self): |
|
201 | 203 | # TODO is this a good way to do this? |
|
202 | 204 | return os.path.dirname( |
|
203 | 205 | os.path.dirname(pycompat.fsencode(self.git.path)) |
|
204 | 206 | ) |
|
205 | 207 | |
|
206 | 208 | def normalize(self, path): |
|
207 | 209 | normed = util.normcase(path) |
|
208 | 210 | assert normed == path, b"TODO handling of case folding: %s != %s" % ( |
|
209 | 211 | normed, |
|
210 | 212 | path, |
|
211 | 213 | ) |
|
212 | 214 | return path |
|
213 | 215 | |
|
214 | 216 | @property |
|
215 | 217 | def _checklink(self): |
|
216 | 218 | return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path))) |
|
217 | 219 | |
|
218 | 220 | def copies(self): |
|
219 | 221 | # TODO support copies? |
|
220 | 222 | return {} |
|
221 | 223 | |
|
222 | 224 | # # TODO what the heck is this |
|
223 | 225 | _filecache = set() |
|
224 | 226 | |
|
225 | 227 | def pendingparentchange(self): |
|
226 | 228 | # TODO: we need to implement the context manager bits and |
|
227 | 229 | # correctly stage/revert index edits. |
|
228 | 230 | return False |
|
229 | 231 | |
|
230 | 232 | def write(self, tr): |
|
231 | 233 | # TODO: call parent change callbacks |
|
232 | 234 | |
|
233 | 235 | if tr: |
|
234 | 236 | |
|
235 | 237 | def writeinner(category): |
|
236 | 238 | self.git.index.write() |
|
237 | 239 | |
|
238 | 240 | tr.addpending(b'gitdirstate', writeinner) |
|
239 | 241 | else: |
|
240 | 242 | self.git.index.write() |
|
241 | 243 | |
|
242 | 244 | def pathto(self, f, cwd=None): |
|
243 | 245 | if cwd is None: |
|
244 | 246 | cwd = self.getcwd() |
|
245 | 247 | # TODO core dirstate does something about slashes here |
|
246 | 248 | assert isinstance(f, bytes) |
|
247 | 249 | r = util.pathto(self._root, cwd, f) |
|
248 | 250 | return r |
|
249 | 251 | |
|
250 | 252 | def matches(self, match): |
|
251 | 253 | for x in self.git.index: |
|
252 | 254 | p = pycompat.fsencode(x.path) |
|
253 | 255 | if match(p): |
|
254 | 256 | yield p |
|
255 | 257 | |
|
256 | 258 | def normal(self, f, parentfiledata=None): |
|
257 | 259 | """Mark a file normal and clean.""" |
|
258 | 260 | # TODO: for now we just let libgit2 re-stat the file. We can |
|
259 | 261 | # clearly do better. |
|
260 | 262 | |
|
261 | 263 | def normallookup(self, f): |
|
262 | 264 | """Mark a file normal, but possibly dirty.""" |
|
263 | 265 | # TODO: for now we just let libgit2 re-stat the file. We can |
|
264 | 266 | # clearly do better. |
|
265 | 267 | |
|
266 | 268 | def walk(self, match, subrepos, unknown, ignored, full=True): |
|
267 | 269 | # TODO: we need to use .status() and not iterate the index, |
|
268 | 270 | # because the index doesn't force a re-walk and so `hg add` of |
|
269 | 271 | # a new file without an intervening call to status will |
|
270 | 272 | # silently do nothing. |
|
271 | 273 | r = {} |
|
272 | 274 | cwd = self.getcwd() |
|
273 | 275 | for path, status in self.git.status().items(): |
|
274 | 276 | if path.startswith('.hg/'): |
|
275 | 277 | continue |
|
276 | 278 | path = pycompat.fsencode(path) |
|
277 | 279 | if not match(path): |
|
278 | 280 | continue |
|
279 | 281 | # TODO construct the stat info from the status object? |
|
280 | 282 | try: |
|
281 | 283 | s = os.stat(os.path.join(cwd, path)) |
|
282 | 284 | except OSError as e: |
|
283 | 285 | if e.errno != errno.ENOENT: |
|
284 | 286 | raise |
|
285 | 287 | continue |
|
286 | 288 | r[path] = s |
|
287 | 289 | return r |
|
288 | 290 | |
|
289 | 291 | def savebackup(self, tr, backupname): |
|
290 | 292 | # TODO: figure out a strategy for saving index backups. |
|
291 | 293 | pass |
|
292 | 294 | |
|
293 | 295 | def restorebackup(self, tr, backupname): |
|
294 | 296 | # TODO: figure out a strategy for saving index backups. |
|
295 | 297 | pass |
|
296 | 298 | |
|
297 | 299 | def add(self, f): |
|
298 | 300 | index = self.git.index |
|
299 | 301 | index.read() |
|
300 | 302 | index.add(pycompat.fsdecode(f)) |
|
301 | 303 | index.write() |
|
302 | 304 | |
|
303 | 305 | def drop(self, f): |
|
304 | 306 | index = self.git.index |
|
305 | 307 | index.read() |
|
306 | 308 | fs = pycompat.fsdecode(f) |
|
307 | 309 | if fs in index: |
|
308 | 310 | index.remove(fs) |
|
309 | 311 | index.write() |
|
310 | 312 | |
|
311 | 313 | def remove(self, f): |
|
312 | 314 | index = self.git.index |
|
313 | 315 | index.read() |
|
314 | 316 | index.remove(pycompat.fsdecode(f)) |
|
315 | 317 | index.write() |
|
316 | 318 | |
|
317 | 319 | def copied(self, path): |
|
318 | 320 | # TODO: track copies? |
|
319 | 321 | return None |
|
320 | 322 | |
|
321 | 323 | def prefetch_parents(self): |
|
322 | 324 | # TODO |
|
323 | 325 | pass |
|
324 | 326 | |
|
325 | 327 | @contextlib.contextmanager |
|
326 | 328 | def parentchange(self): |
|
327 | 329 | # TODO: track this maybe? |
|
328 | 330 | yield |
|
329 | 331 | |
|
330 | 332 | def addparentchangecallback(self, category, callback): |
|
331 | 333 | # TODO: should this be added to the dirstate interface? |
|
332 | 334 | self._plchangecallbacks[category] = callback |
|
333 | 335 | |
|
334 | 336 | def clearbackup(self, tr, backupname): |
|
335 | 337 | # TODO |
|
336 | 338 | pass |
|
337 | 339 | |
|
338 | 340 | def setbranch(self, branch): |
|
339 | 341 | raise error.Abort( |
|
340 | 342 | b'git repos do not support branches. try using bookmarks' |
|
341 | 343 | ) |
@@ -1,543 +1,543 b'' | |||
|
1 | 1 | from __future__ import absolute_import |
|
2 | 2 | |
|
3 | 3 | from mercurial.i18n import _ |
|
4 | 4 | |
|
5 | 5 | from mercurial.node import ( |
|
6 | 6 | bin, |
|
7 | 7 | hex, |
|
8 | nullhex, | |
|
9 | nullid, | |
|
10 | 8 | nullrev, |
|
11 | 9 | sha1nodeconstants, |
|
12 | wdirhex, | |
|
13 | 10 | ) |
|
14 | 11 | from mercurial import ( |
|
15 | 12 | ancestor, |
|
16 | 13 | changelog as hgchangelog, |
|
17 | 14 | dagop, |
|
18 | 15 | encoding, |
|
19 | 16 | error, |
|
20 | 17 | manifest, |
|
21 | 18 | pycompat, |
|
22 | 19 | ) |
|
23 | 20 | from mercurial.interfaces import ( |
|
24 | 21 | repository, |
|
25 | 22 | util as interfaceutil, |
|
26 | 23 | ) |
|
27 | 24 | from mercurial.utils import stringutil |
|
28 | 25 | from . import ( |
|
29 | 26 | gitutil, |
|
30 | 27 | index, |
|
31 | 28 | manifest as gitmanifest, |
|
32 | 29 | ) |
|
33 | 30 | |
|
34 | 31 | pygit2 = gitutil.get_pygit2() |
|
35 | 32 | |
|
36 | 33 | |
|
37 | 34 | class baselog(object): # revlog.revlog): |
|
38 | 35 | """Common implementations between changelog and manifestlog.""" |
|
39 | 36 | |
|
40 | 37 | def __init__(self, gr, db): |
|
41 | 38 | self.gitrepo = gr |
|
42 | 39 | self._db = db |
|
43 | 40 | |
|
44 | 41 | def __len__(self): |
|
45 | 42 | return int( |
|
46 | 43 | self._db.execute('SELECT COUNT(*) FROM changelog').fetchone()[0] |
|
47 | 44 | ) |
|
48 | 45 | |
|
49 | 46 | def rev(self, n): |
|
50 | if n == nullid: | |
|
47 | if n == sha1nodeconstants.nullid: | |
|
51 | 48 | return -1 |
|
52 | 49 | t = self._db.execute( |
|
53 | 50 | 'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),) |
|
54 | 51 | ).fetchone() |
|
55 | 52 | if t is None: |
|
56 | 53 | raise error.LookupError(n, b'00changelog.i', _(b'no node %d')) |
|
57 | 54 | return t[0] |
|
58 | 55 | |
|
59 | 56 | def node(self, r): |
|
60 | 57 | if r == nullrev: |
|
61 | return nullid | |
|
58 | return sha1nodeconstants.nullid | |
|
62 | 59 | t = self._db.execute( |
|
63 | 60 | 'SELECT node FROM changelog WHERE rev = ?', (r,) |
|
64 | 61 | ).fetchone() |
|
65 | 62 | if t is None: |
|
66 | 63 | raise error.LookupError(r, b'00changelog.i', _(b'no node')) |
|
67 | 64 | return bin(t[0]) |
|
68 | 65 | |
|
69 | 66 | def hasnode(self, n): |
|
70 | 67 | t = self._db.execute( |
|
71 | 68 | 'SELECT node FROM changelog WHERE node = ?', (n,) |
|
72 | 69 | ).fetchone() |
|
73 | 70 | return t is not None |
|
74 | 71 | |
|
75 | 72 | |
|
76 | 73 | class baselogindex(object): |
|
77 | 74 | def __init__(self, log): |
|
78 | 75 | self._log = log |
|
79 | 76 | |
|
80 | 77 | def has_node(self, n): |
|
81 | 78 | return self._log.rev(n) != -1 |
|
82 | 79 | |
|
83 | 80 | def __len__(self): |
|
84 | 81 | return len(self._log) |
|
85 | 82 | |
|
86 | 83 | def __getitem__(self, idx): |
|
87 | 84 | p1rev, p2rev = self._log.parentrevs(idx) |
|
88 | 85 | # TODO: it's messy that the index leaks so far out of the |
|
89 | 86 | # storage layer that we have to implement things like reading |
|
90 | 87 | # this raw tuple, which exposes revlog internals. |
|
91 | 88 | return ( |
|
92 | 89 | # Pretend offset is just the index, since we don't really care. |
|
93 | 90 | idx, |
|
94 | 91 | # Same with lengths |
|
95 | 92 | idx, # length |
|
96 | 93 | idx, # rawsize |
|
97 | 94 | -1, # delta base |
|
98 | 95 | idx, # linkrev TODO is this right? |
|
99 | 96 | p1rev, |
|
100 | 97 | p2rev, |
|
101 | 98 | self._log.node(idx), |
|
102 | 99 | ) |
|
103 | 100 | |
|
104 | 101 | |
|
105 | 102 | # TODO: an interface for the changelog type? |
|
106 | 103 | class changelog(baselog): |
|
107 | 104 | # TODO: this appears to be an enumerated type, and should probably |
|
108 | 105 | # be part of the public changelog interface |
|
109 | 106 | _copiesstorage = b'extra' |
|
110 | 107 | |
|
111 | 108 | def __contains__(self, rev): |
|
112 | 109 | try: |
|
113 | 110 | self.node(rev) |
|
114 | 111 | return True |
|
115 | 112 | except error.LookupError: |
|
116 | 113 | return False |
|
117 | 114 | |
|
118 | 115 | def __iter__(self): |
|
119 | 116 | return iter(pycompat.xrange(len(self))) |
|
120 | 117 | |
|
121 | 118 | @property |
|
122 | 119 | def filteredrevs(self): |
|
123 | 120 | # TODO: we should probably add a refs/hg/ namespace for hidden |
|
124 | 121 | # heads etc, but that's an idea for later. |
|
125 | 122 | return set() |
|
126 | 123 | |
|
127 | 124 | @property |
|
128 | 125 | def index(self): |
|
129 | 126 | return baselogindex(self) |
|
130 | 127 | |
|
131 | 128 | @property |
|
132 | 129 | def nodemap(self): |
|
133 | 130 | r = { |
|
134 | 131 | bin(v[0]): v[1] |
|
135 | 132 | for v in self._db.execute('SELECT node, rev FROM changelog') |
|
136 | 133 | } |
|
137 | r[nullid] = nullrev | |
|
134 | r[sha1nodeconstants.nullid] = nullrev | |
|
138 | 135 | return r |
|
139 | 136 | |
|
140 | 137 | def tip(self): |
|
141 | 138 | t = self._db.execute( |
|
142 | 139 | 'SELECT node FROM changelog ORDER BY rev DESC LIMIT 1' |
|
143 | 140 | ).fetchone() |
|
144 | 141 | if t: |
|
145 | 142 | return bin(t[0]) |
|
146 | return nullid | |
|
143 | return sha1nodeconstants.nullid | |
|
147 | 144 | |
|
148 | 145 | def revs(self, start=0, stop=None): |
|
149 | 146 | if stop is None: |
|
150 | 147 | stop = self.tip() |
|
151 | 148 | t = self._db.execute( |
|
152 | 149 | 'SELECT rev FROM changelog ' |
|
153 | 150 | 'WHERE rev >= ? AND rev <= ? ' |
|
154 | 151 | 'ORDER BY REV ASC', |
|
155 | 152 | (start, stop), |
|
156 | 153 | ) |
|
157 | 154 | return (int(r[0]) for r in t) |
|
158 | 155 | |
|
159 | 156 | def tiprev(self): |
|
160 | 157 | t = self._db.execute( |
|
161 | 158 | 'SELECT rev FROM changelog ' 'ORDER BY REV DESC ' 'LIMIT 1' |
|
162 | 159 | ) |
|
163 | 160 | return next(t) |
|
164 | 161 | |
|
165 | 162 | def _partialmatch(self, id): |
|
166 | if wdirhex.startswith(id): | |
|
163 | if sha1nodeconstants.wdirhex.startswith(id): | |
|
167 | 164 | raise error.WdirUnsupported |
|
168 | 165 | candidates = [ |
|
169 | 166 | bin(x[0]) |
|
170 | 167 | for x in self._db.execute( |
|
171 | 168 | 'SELECT node FROM changelog WHERE node LIKE ?', (id + b'%',) |
|
172 | 169 | ) |
|
173 | 170 | ] |
|
174 | if nullhex.startswith(id): | |
|
175 | candidates.append(nullid) | |
|
171 | if sha1nodeconstants.nullhex.startswith(id): | |
|
172 | candidates.append(sha1nodeconstants.nullid) | |
|
176 | 173 | if len(candidates) > 1: |
|
177 | 174 | raise error.AmbiguousPrefixLookupError( |
|
178 | 175 | id, b'00changelog.i', _(b'ambiguous identifier') |
|
179 | 176 | ) |
|
180 | 177 | if candidates: |
|
181 | 178 | return candidates[0] |
|
182 | 179 | return None |
|
183 | 180 | |
|
184 | 181 | def flags(self, rev): |
|
185 | 182 | return 0 |
|
186 | 183 | |
|
187 | 184 | def shortest(self, node, minlength=1): |
|
188 | 185 | nodehex = hex(node) |
|
189 | 186 | for attempt in pycompat.xrange(minlength, len(nodehex) + 1): |
|
190 | 187 | candidate = nodehex[:attempt] |
|
191 | 188 | matches = int( |
|
192 | 189 | self._db.execute( |
|
193 | 190 | 'SELECT COUNT(*) FROM changelog WHERE node LIKE ?', |
|
194 | 191 | (pycompat.sysstr(candidate + b'%'),), |
|
195 | 192 | ).fetchone()[0] |
|
196 | 193 | ) |
|
197 | 194 | if matches == 1: |
|
198 | 195 | return candidate |
|
199 | 196 | return nodehex |
|
200 | 197 | |
|
201 | 198 | def headrevs(self, revs=None): |
|
202 | 199 | realheads = [ |
|
203 | 200 | int(x[0]) |
|
204 | 201 | for x in self._db.execute( |
|
205 | 202 | 'SELECT rev FROM changelog ' |
|
206 | 203 | 'INNER JOIN heads ON changelog.node = heads.node' |
|
207 | 204 | ) |
|
208 | 205 | ] |
|
209 | 206 | if revs: |
|
210 | 207 | return sorted([r for r in revs if r in realheads]) |
|
211 | 208 | return sorted(realheads) |
|
212 | 209 | |
|
213 | 210 | def changelogrevision(self, nodeorrev): |
|
214 | 211 | # Ensure we have a node id |
|
215 | 212 | if isinstance(nodeorrev, int): |
|
216 | 213 | n = self.node(nodeorrev) |
|
217 | 214 | else: |
|
218 | 215 | n = nodeorrev |
|
219 | 216 | # handle looking up nullid |
|
220 | if n == nullid: | |
|
221 |
return hgchangelog._changelogrevision( |
|
|
217 | if n == sha1nodeconstants.nullid: | |
|
218 | return hgchangelog._changelogrevision( | |
|
219 | extra={}, manifest=sha1nodeconstants.nullid | |
|
220 | ) | |
|
222 | 221 | hn = gitutil.togitnode(n) |
|
223 | 222 | # We've got a real commit! |
|
224 | 223 | files = [ |
|
225 | 224 | r[0] |
|
226 | 225 | for r in self._db.execute( |
|
227 | 226 | 'SELECT filename FROM changedfiles ' |
|
228 | 227 | 'WHERE node = ? and filenode != ?', |
|
229 | 228 | (hn, gitutil.nullgit), |
|
230 | 229 | ) |
|
231 | 230 | ] |
|
232 | 231 | filesremoved = [ |
|
233 | 232 | r[0] |
|
234 | 233 | for r in self._db.execute( |
|
235 | 234 | 'SELECT filename FROM changedfiles ' |
|
236 | 235 | 'WHERE node = ? and filenode = ?', |
|
237 | (hn, nullhex), | |
|
236 | (hn, sha1nodeconstants.nullhex), | |
|
238 | 237 | ) |
|
239 | 238 | ] |
|
240 | 239 | c = self.gitrepo[hn] |
|
241 | 240 | return hgchangelog._changelogrevision( |
|
242 | 241 | manifest=n, # pretend manifest the same as the commit node |
|
243 | 242 | user=b'%s <%s>' |
|
244 | 243 | % (c.author.name.encode('utf8'), c.author.email.encode('utf8')), |
|
245 | 244 | date=(c.author.time, -c.author.offset * 60), |
|
246 | 245 | files=files, |
|
247 | 246 | # TODO filesadded in the index |
|
248 | 247 | filesremoved=filesremoved, |
|
249 | 248 | description=c.message.encode('utf8'), |
|
250 | 249 | # TODO do we want to handle extra? how? |
|
251 | 250 | extra={b'branch': b'default'}, |
|
252 | 251 | ) |
|
253 | 252 | |
|
254 | 253 | def ancestors(self, revs, stoprev=0, inclusive=False): |
|
255 | 254 | revs = list(revs) |
|
256 | 255 | tip = self.rev(self.tip()) |
|
257 | 256 | for r in revs: |
|
258 | 257 | if r > tip: |
|
259 | 258 | raise IndexError(b'Invalid rev %r' % r) |
|
260 | 259 | return ancestor.lazyancestors( |
|
261 | 260 | self.parentrevs, revs, stoprev=stoprev, inclusive=inclusive |
|
262 | 261 | ) |
|
263 | 262 | |
|
264 | 263 | # Cleanup opportunity: this is *identical* to the revlog.py version |
|
265 | 264 | def descendants(self, revs): |
|
266 | 265 | return dagop.descendantrevs(revs, self.revs, self.parentrevs) |
|
267 | 266 | |
|
268 | 267 | def incrementalmissingrevs(self, common=None): |
|
269 | 268 | """Return an object that can be used to incrementally compute the |
|
270 | 269 | revision numbers of the ancestors of arbitrary sets that are not |
|
271 | 270 | ancestors of common. This is an ancestor.incrementalmissingancestors |
|
272 | 271 | object. |
|
273 | 272 | |
|
274 | 273 | 'common' is a list of revision numbers. If common is not supplied, uses |
|
275 | 274 | nullrev. |
|
276 | 275 | """ |
|
277 | 276 | if common is None: |
|
278 | 277 | common = [nullrev] |
|
279 | 278 | |
|
280 | 279 | return ancestor.incrementalmissingancestors(self.parentrevs, common) |
|
281 | 280 | |
|
282 | 281 | def findmissing(self, common=None, heads=None): |
|
283 | 282 | """Return the ancestors of heads that are not ancestors of common. |
|
284 | 283 | |
|
285 | 284 | More specifically, return a list of nodes N such that every N |
|
286 | 285 | satisfies the following constraints: |
|
287 | 286 | |
|
288 | 287 | 1. N is an ancestor of some node in 'heads' |
|
289 | 288 | 2. N is not an ancestor of any node in 'common' |
|
290 | 289 | |
|
291 | 290 | The list is sorted by revision number, meaning it is |
|
292 | 291 | topologically sorted. |
|
293 | 292 | |
|
294 | 293 | 'heads' and 'common' are both lists of node IDs. If heads is |
|
295 | 294 | not supplied, uses all of the revlog's heads. If common is not |
|
296 | 295 | supplied, uses nullid.""" |
|
297 | 296 | if common is None: |
|
298 | common = [nullid] | |
|
297 | common = [sha1nodeconstants.nullid] | |
|
299 | 298 | if heads is None: |
|
300 | 299 | heads = self.heads() |
|
301 | 300 | |
|
302 | 301 | common = [self.rev(n) for n in common] |
|
303 | 302 | heads = [self.rev(n) for n in heads] |
|
304 | 303 | |
|
305 | 304 | inc = self.incrementalmissingrevs(common=common) |
|
306 | 305 | return [self.node(r) for r in inc.missingancestors(heads)] |
|
307 | 306 | |
|
308 | 307 | def children(self, node): |
|
309 | 308 | """find the children of a given node""" |
|
310 | 309 | c = [] |
|
311 | 310 | p = self.rev(node) |
|
312 | 311 | for r in self.revs(start=p + 1): |
|
313 | 312 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] |
|
314 | 313 | if prevs: |
|
315 | 314 | for pr in prevs: |
|
316 | 315 | if pr == p: |
|
317 | 316 | c.append(self.node(r)) |
|
318 | 317 | elif p == nullrev: |
|
319 | 318 | c.append(self.node(r)) |
|
320 | 319 | return c |
|
321 | 320 | |
|
322 | 321 | def reachableroots(self, minroot, heads, roots, includepath=False): |
|
323 | 322 | return dagop._reachablerootspure( |
|
324 | 323 | self.parentrevs, minroot, roots, heads, includepath |
|
325 | 324 | ) |
|
326 | 325 | |
|
327 | 326 | # Cleanup opportunity: this is *identical* to the revlog.py version |
|
328 | 327 | def isancestor(self, a, b): |
|
329 | 328 | a, b = self.rev(a), self.rev(b) |
|
330 | 329 | return self.isancestorrev(a, b) |
|
331 | 330 | |
|
332 | 331 | # Cleanup opportunity: this is *identical* to the revlog.py version |
|
333 | 332 | def isancestorrev(self, a, b): |
|
334 | 333 | if a == nullrev: |
|
335 | 334 | return True |
|
336 | 335 | elif a == b: |
|
337 | 336 | return True |
|
338 | 337 | elif a > b: |
|
339 | 338 | return False |
|
340 | 339 | return bool(self.reachableroots(a, [b], [a], includepath=False)) |
|
341 | 340 | |
|
342 | 341 | def parentrevs(self, rev): |
|
343 | 342 | n = self.node(rev) |
|
344 | 343 | hn = gitutil.togitnode(n) |
|
345 | 344 | if hn != gitutil.nullgit: |
|
346 | 345 | c = self.gitrepo[hn] |
|
347 | 346 | else: |
|
348 | 347 | return nullrev, nullrev |
|
349 | 348 | p1 = p2 = nullrev |
|
350 | 349 | if c.parents: |
|
351 | 350 | p1 = self.rev(c.parents[0].id.raw) |
|
352 | 351 | if len(c.parents) > 2: |
|
353 | 352 | raise error.Abort(b'TODO octopus merge handling') |
|
354 | 353 | if len(c.parents) == 2: |
|
355 | 354 | p2 = self.rev(c.parents[1].id.raw) |
|
356 | 355 | return p1, p2 |
|
357 | 356 | |
|
358 | 357 | # Private method is used at least by the tags code. |
|
359 | 358 | _uncheckedparentrevs = parentrevs |
|
360 | 359 | |
|
361 | 360 | def commonancestorsheads(self, a, b): |
|
362 | 361 | # TODO the revlog verson of this has a C path, so we probably |
|
363 | 362 | # need to optimize this... |
|
364 | 363 | a, b = self.rev(a), self.rev(b) |
|
365 | 364 | return [ |
|
366 | 365 | self.node(n) |
|
367 | 366 | for n in ancestor.commonancestorsheads(self.parentrevs, a, b) |
|
368 | 367 | ] |
|
369 | 368 | |
|
370 | 369 | def branchinfo(self, rev): |
|
371 | 370 | """Git doesn't do named branches, so just put everything on default.""" |
|
372 | 371 | return b'default', False |
|
373 | 372 | |
|
374 | 373 | def delayupdate(self, tr): |
|
375 | 374 | # TODO: I think we can elide this because we're just dropping |
|
376 | 375 | # an object in the git repo? |
|
377 | 376 | pass |
|
378 | 377 | |
|
379 | 378 | def add( |
|
380 | 379 | self, |
|
381 | 380 | manifest, |
|
382 | 381 | files, |
|
383 | 382 | desc, |
|
384 | 383 | transaction, |
|
385 | 384 | p1, |
|
386 | 385 | p2, |
|
387 | 386 | user, |
|
388 | 387 | date=None, |
|
389 | 388 | extra=None, |
|
390 | 389 | p1copies=None, |
|
391 | 390 | p2copies=None, |
|
392 | 391 | filesadded=None, |
|
393 | 392 | filesremoved=None, |
|
394 | 393 | ): |
|
395 | 394 | parents = [] |
|
396 | 395 | hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2) |
|
397 | if p1 != nullid: | |
|
396 | if p1 != sha1nodeconstants.nullid: | |
|
398 | 397 | parents.append(hp1) |
|
399 | if p2 and p2 != nullid: | |
|
398 | if p2 and p2 != sha1nodeconstants.nullid: | |
|
400 | 399 | parents.append(hp2) |
|
401 | 400 | assert date is not None |
|
402 | 401 | timestamp, tz = date |
|
403 | 402 | sig = pygit2.Signature( |
|
404 | 403 | encoding.unifromlocal(stringutil.person(user)), |
|
405 | 404 | encoding.unifromlocal(stringutil.email(user)), |
|
406 | 405 | int(timestamp), |
|
407 | 406 | -int(tz // 60), |
|
408 | 407 | ) |
|
409 | 408 | oid = self.gitrepo.create_commit( |
|
410 | 409 | None, sig, sig, desc, gitutil.togitnode(manifest), parents |
|
411 | 410 | ) |
|
412 | 411 | # Set up an internal reference to force the commit into the |
|
413 | 412 | # changelog. Hypothetically, we could even use this refs/hg/ |
|
414 | 413 | # namespace to allow for anonymous heads on git repos, which |
|
415 | 414 | # would be neat. |
|
416 | 415 | self.gitrepo.references.create( |
|
417 | 416 | 'refs/hg/internal/latest-commit', oid, force=True |
|
418 | 417 | ) |
|
419 | 418 | # Reindex now to pick up changes. We omit the progress |
|
420 | 419 | # and log callbacks because this will be very quick. |
|
421 | 420 | index._index_repo(self.gitrepo, self._db) |
|
422 | 421 | return oid.raw |
|
423 | 422 | |
|
424 | 423 | |
|
425 | 424 | class manifestlog(baselog): |
|
426 | 425 | nodeconstants = sha1nodeconstants |
|
427 | 426 | |
|
428 | 427 | def __getitem__(self, node): |
|
429 | 428 | return self.get(b'', node) |
|
430 | 429 | |
|
431 | 430 | def get(self, relpath, node): |
|
432 | if node == nullid: | |
|
431 | if node == sha1nodeconstants.nullid: | |
|
433 | 432 | # TODO: this should almost certainly be a memgittreemanifestctx |
|
434 | 433 | return manifest.memtreemanifestctx(self, relpath) |
|
435 | 434 | commit = self.gitrepo[gitutil.togitnode(node)] |
|
436 | 435 | t = commit.tree |
|
437 | 436 | if relpath: |
|
438 | 437 | parts = relpath.split(b'/') |
|
439 | 438 | for p in parts: |
|
440 | 439 | te = t[p] |
|
441 | 440 | t = self.gitrepo[te.id] |
|
442 | 441 | return gitmanifest.gittreemanifestctx(self.gitrepo, t) |
|
443 | 442 | |
|
444 | 443 | |
|
445 | 444 | @interfaceutil.implementer(repository.ifilestorage) |
|
446 | 445 | class filelog(baselog): |
|
447 | 446 | def __init__(self, gr, db, path): |
|
448 | 447 | super(filelog, self).__init__(gr, db) |
|
449 | 448 | assert isinstance(path, bytes) |
|
450 | 449 | self.path = path |
|
450 | self.nullid = sha1nodeconstants.nullid | |
|
451 | 451 | |
|
452 | 452 | def read(self, node): |
|
453 | if node == nullid: | |
|
453 | if node == sha1nodeconstants.nullid: | |
|
454 | 454 | return b'' |
|
455 | 455 | return self.gitrepo[gitutil.togitnode(node)].data |
|
456 | 456 | |
|
457 | 457 | def lookup(self, node): |
|
458 | 458 | if len(node) not in (20, 40): |
|
459 | 459 | node = int(node) |
|
460 | 460 | if isinstance(node, int): |
|
461 | 461 | assert False, b'todo revnums for nodes' |
|
462 | 462 | if len(node) == 40: |
|
463 | 463 | node = bin(node) |
|
464 | 464 | hnode = gitutil.togitnode(node) |
|
465 | 465 | if hnode in self.gitrepo: |
|
466 | 466 | return node |
|
467 | 467 | raise error.LookupError(self.path, node, _(b'no match found')) |
|
468 | 468 | |
|
469 | 469 | def cmp(self, node, text): |
|
470 | 470 | """Returns True if text is different than content at `node`.""" |
|
471 | 471 | return self.read(node) != text |
|
472 | 472 | |
|
473 | 473 | def add(self, text, meta, transaction, link, p1=None, p2=None): |
|
474 | 474 | assert not meta # Should we even try to handle this? |
|
475 | 475 | return self.gitrepo.create_blob(text).raw |
|
476 | 476 | |
|
477 | 477 | def __iter__(self): |
|
478 | 478 | for clrev in self._db.execute( |
|
479 | 479 | ''' |
|
480 | 480 | SELECT rev FROM changelog |
|
481 | 481 | INNER JOIN changedfiles ON changelog.node = changedfiles.node |
|
482 | 482 | WHERE changedfiles.filename = ? AND changedfiles.filenode != ? |
|
483 | 483 | ''', |
|
484 | 484 | (pycompat.fsdecode(self.path), gitutil.nullgit), |
|
485 | 485 | ): |
|
486 | 486 | yield clrev[0] |
|
487 | 487 | |
|
488 | 488 | def linkrev(self, fr): |
|
489 | 489 | return fr |
|
490 | 490 | |
|
491 | 491 | def rev(self, node): |
|
492 | 492 | row = self._db.execute( |
|
493 | 493 | ''' |
|
494 | 494 | SELECT rev FROM changelog |
|
495 | 495 | INNER JOIN changedfiles ON changelog.node = changedfiles.node |
|
496 | 496 | WHERE changedfiles.filename = ? AND changedfiles.filenode = ?''', |
|
497 | 497 | (pycompat.fsdecode(self.path), gitutil.togitnode(node)), |
|
498 | 498 | ).fetchone() |
|
499 | 499 | if row is None: |
|
500 | 500 | raise error.LookupError(self.path, node, _(b'no such node')) |
|
501 | 501 | return int(row[0]) |
|
502 | 502 | |
|
503 | 503 | def node(self, rev): |
|
504 | 504 | maybe = self._db.execute( |
|
505 | 505 | '''SELECT filenode FROM changedfiles |
|
506 | 506 | INNER JOIN changelog ON changelog.node = changedfiles.node |
|
507 | 507 | WHERE changelog.rev = ? AND filename = ? |
|
508 | 508 | ''', |
|
509 | 509 | (rev, pycompat.fsdecode(self.path)), |
|
510 | 510 | ).fetchone() |
|
511 | 511 | if maybe is None: |
|
512 | 512 | raise IndexError('gitlog %r out of range %d' % (self.path, rev)) |
|
513 | 513 | return bin(maybe[0]) |
|
514 | 514 | |
|
515 | 515 | def parents(self, node): |
|
516 | 516 | gn = gitutil.togitnode(node) |
|
517 | 517 | gp = pycompat.fsdecode(self.path) |
|
518 | 518 | ps = [] |
|
519 | 519 | for p in self._db.execute( |
|
520 | 520 | '''SELECT p1filenode, p2filenode FROM changedfiles |
|
521 | 521 | WHERE filenode = ? AND filename = ? |
|
522 | 522 | ''', |
|
523 | 523 | (gn, gp), |
|
524 | 524 | ).fetchone(): |
|
525 | 525 | if p is None: |
|
526 | 526 | commit = self._db.execute( |
|
527 | 527 | "SELECT node FROM changedfiles " |
|
528 | 528 | "WHERE filenode = ? AND filename = ?", |
|
529 | 529 | (gn, gp), |
|
530 | 530 | ).fetchone()[0] |
|
531 | 531 | # This filelog is missing some data. Build the |
|
532 | 532 | # filelog, then recurse (which will always find data). |
|
533 | 533 | if pycompat.ispy3: |
|
534 | 534 | commit = commit.decode('ascii') |
|
535 | 535 | index.fill_in_filelog(self.gitrepo, self._db, commit, gp, gn) |
|
536 | 536 | return self.parents(node) |
|
537 | 537 | else: |
|
538 | 538 | ps.append(bin(p)) |
|
539 | 539 | return ps |
|
540 | 540 | |
|
541 | 541 | def renamed(self, node): |
|
542 | 542 | # TODO: renames/copies |
|
543 | 543 | return False |
@@ -1,53 +1,53 b'' | |||
|
1 | 1 | """utilities to assist in working with pygit2""" |
|
2 | 2 | from __future__ import absolute_import |
|
3 | 3 | |
|
4 |
from mercurial.node import bin, hex, |
|
|
4 | from mercurial.node import bin, hex, sha1nodeconstants | |
|
5 | 5 | |
|
6 | 6 | from mercurial import pycompat |
|
7 | 7 | |
|
8 | 8 | pygit2_module = None |
|
9 | 9 | |
|
10 | 10 | |
|
11 | 11 | def get_pygit2(): |
|
12 | 12 | global pygit2_module |
|
13 | 13 | if pygit2_module is None: |
|
14 | 14 | try: |
|
15 | 15 | import pygit2 as pygit2_module |
|
16 | 16 | |
|
17 | 17 | pygit2_module.InvalidSpecError |
|
18 | 18 | except (ImportError, AttributeError): |
|
19 | 19 | pass |
|
20 | 20 | return pygit2_module |
|
21 | 21 | |
|
22 | 22 | |
|
23 | 23 | def pygit2_version(): |
|
24 | 24 | mod = get_pygit2() |
|
25 | 25 | v = "N/A" |
|
26 | 26 | |
|
27 | 27 | if mod: |
|
28 | 28 | try: |
|
29 | 29 | v = mod.__version__ |
|
30 | 30 | except AttributeError: |
|
31 | 31 | pass |
|
32 | 32 | |
|
33 | 33 | return b"(pygit2 %s)" % v.encode("utf-8") |
|
34 | 34 | |
|
35 | 35 | |
|
36 | 36 | def togitnode(n): |
|
37 | 37 | """Wrapper to convert a Mercurial binary node to a unicode hexlified node. |
|
38 | 38 | |
|
39 | 39 | pygit2 and sqlite both need nodes as strings, not bytes. |
|
40 | 40 | """ |
|
41 | 41 | assert len(n) == 20 |
|
42 | 42 | return pycompat.sysstr(hex(n)) |
|
43 | 43 | |
|
44 | 44 | |
|
45 | 45 | def fromgitnode(n): |
|
46 | 46 | """Opposite of togitnode.""" |
|
47 | 47 | assert len(n) == 40 |
|
48 | 48 | if pycompat.ispy3: |
|
49 | 49 | return bin(n.encode('ascii')) |
|
50 | 50 | return bin(n) |
|
51 | 51 | |
|
52 | 52 | |
|
53 | nullgit = togitnode(nullid) | |
|
53 | nullgit = togitnode(sha1nodeconstants.nullid) |
@@ -1,362 +1,361 b'' | |||
|
1 | 1 | from __future__ import absolute_import |
|
2 | 2 | |
|
3 | 3 | import collections |
|
4 | 4 | import os |
|
5 | 5 | import sqlite3 |
|
6 | 6 | |
|
7 | 7 | from mercurial.i18n import _ |
|
8 |
from mercurial.node import |
|
|
9 | nullhex, | |
|
10 | nullid, | |
|
11 | ) | |
|
8 | from mercurial.node import sha1nodeconstants | |
|
12 | 9 | |
|
13 | 10 | from mercurial import ( |
|
14 | 11 | encoding, |
|
15 | 12 | error, |
|
16 | 13 | pycompat, |
|
17 | 14 | ) |
|
18 | 15 | |
|
19 | 16 | from . import gitutil |
|
20 | 17 | |
|
21 | 18 | |
|
22 | 19 | pygit2 = gitutil.get_pygit2() |
|
23 | 20 | |
|
24 | 21 | _CURRENT_SCHEMA_VERSION = 1 |
|
25 | 22 | _SCHEMA = ( |
|
26 | 23 | """ |
|
27 | 24 | CREATE TABLE refs ( |
|
28 | 25 | -- node and name are unique together. There may be more than one name for |
|
29 | 26 | -- a given node, and there may be no name at all for a given node (in the |
|
30 | 27 | -- case of an anonymous hg head). |
|
31 | 28 | node TEXT NOT NULL, |
|
32 | 29 | name TEXT |
|
33 | 30 | ); |
|
34 | 31 | |
|
35 | 32 | -- The "possible heads" of the repository, which we use to figure out |
|
36 | 33 | -- if we need to re-walk the changelog. |
|
37 | 34 | CREATE TABLE possible_heads ( |
|
38 | 35 | node TEXT NOT NULL |
|
39 | 36 | ); |
|
40 | 37 | |
|
41 | 38 | -- The topological heads of the changelog, which hg depends on. |
|
42 | 39 | CREATE TABLE heads ( |
|
43 | 40 | node TEXT NOT NULL |
|
44 | 41 | ); |
|
45 | 42 | |
|
46 | 43 | -- A total ordering of the changelog |
|
47 | 44 | CREATE TABLE changelog ( |
|
48 | 45 | rev INTEGER NOT NULL PRIMARY KEY, |
|
49 | 46 | node TEXT NOT NULL, |
|
50 | 47 | p1 TEXT, |
|
51 | 48 | p2 TEXT |
|
52 | 49 | ); |
|
53 | 50 | |
|
54 | 51 | CREATE UNIQUE INDEX changelog_node_idx ON changelog(node); |
|
55 | 52 | CREATE UNIQUE INDEX changelog_node_rev_idx ON changelog(rev, node); |
|
56 | 53 | |
|
57 | 54 | -- Changed files for each commit, which lets us dynamically build |
|
58 | 55 | -- filelogs. |
|
59 | 56 | CREATE TABLE changedfiles ( |
|
60 | 57 | node TEXT NOT NULL, |
|
61 | 58 | filename TEXT NOT NULL, |
|
62 | 59 | -- 40 zeroes for deletions |
|
63 | 60 | filenode TEXT NOT NULL, |
|
64 | 61 | -- to handle filelog parentage: |
|
65 | 62 | p1node TEXT, |
|
66 | 63 | p1filenode TEXT, |
|
67 | 64 | p2node TEXT, |
|
68 | 65 | p2filenode TEXT |
|
69 | 66 | ); |
|
70 | 67 | |
|
71 | 68 | CREATE INDEX changedfiles_nodes_idx |
|
72 | 69 | ON changedfiles(node); |
|
73 | 70 | |
|
74 | 71 | PRAGMA user_version=%d |
|
75 | 72 | """ |
|
76 | 73 | % _CURRENT_SCHEMA_VERSION |
|
77 | 74 | ) |
|
78 | 75 | |
|
79 | 76 | |
|
80 | 77 | def _createdb(path): |
|
81 | 78 | # print('open db', path) |
|
82 | 79 | # import traceback |
|
83 | 80 | # traceback.print_stack() |
|
84 | 81 | db = sqlite3.connect(encoding.strfromlocal(path)) |
|
85 | 82 | db.text_factory = bytes |
|
86 | 83 | |
|
87 | 84 | res = db.execute('PRAGMA user_version').fetchone()[0] |
|
88 | 85 | |
|
89 | 86 | # New database. |
|
90 | 87 | if res == 0: |
|
91 | 88 | for statement in _SCHEMA.split(';'): |
|
92 | 89 | db.execute(statement.strip()) |
|
93 | 90 | |
|
94 | 91 | db.commit() |
|
95 | 92 | |
|
96 | 93 | elif res == _CURRENT_SCHEMA_VERSION: |
|
97 | 94 | pass |
|
98 | 95 | |
|
99 | 96 | else: |
|
100 | 97 | raise error.Abort(_(b'sqlite database has unrecognized version')) |
|
101 | 98 | |
|
102 | 99 | db.execute('PRAGMA journal_mode=WAL') |
|
103 | 100 | |
|
104 | 101 | return db |
|
105 | 102 | |
|
106 | 103 | |
|
107 | 104 | _OUR_ORDER = () |
|
108 | 105 | if pygit2: |
|
109 | 106 | _OUR_ORDER = ( |
|
110 | 107 | pygit2.GIT_SORT_TOPOLOGICAL |
|
111 | 108 | | pygit2.GIT_SORT_TIME |
|
112 | 109 | | pygit2.GIT_SORT_REVERSE |
|
113 | 110 | ) |
|
114 | 111 | |
|
115 | 112 | _DIFF_FLAGS = 1 << 21 # GIT_DIFF_FORCE_BINARY, which isn't exposed by pygit2 |
|
116 | 113 | |
|
117 | 114 | |
|
118 | 115 | def _find_nearest_ancestor_introducing_node( |
|
119 | 116 | db, gitrepo, file_path, walk_start, filenode |
|
120 | 117 | ): |
|
121 | 118 | """Find the nearest ancestor that introduces a file node. |
|
122 | 119 | |
|
123 | 120 | Args: |
|
124 | 121 | db: a handle to our sqlite database. |
|
125 | 122 | gitrepo: A pygit2.Repository instance. |
|
126 | 123 | file_path: the path of a file in the repo |
|
127 | 124 | walk_start: a pygit2.Oid that is a commit where we should start walking |
|
128 | 125 | for our nearest ancestor. |
|
129 | 126 | |
|
130 | 127 | Returns: |
|
131 | 128 | A hexlified SHA that is the commit ID of the next-nearest parent. |
|
132 | 129 | """ |
|
133 | 130 | assert isinstance(file_path, str), 'file_path must be str, got %r' % type( |
|
134 | 131 | file_path |
|
135 | 132 | ) |
|
136 | 133 | assert isinstance(filenode, str), 'filenode must be str, got %r' % type( |
|
137 | 134 | filenode |
|
138 | 135 | ) |
|
139 | 136 | parent_options = { |
|
140 | 137 | row[0].decode('ascii') |
|
141 | 138 | for row in db.execute( |
|
142 | 139 | 'SELECT node FROM changedfiles ' |
|
143 | 140 | 'WHERE filename = ? AND filenode = ?', |
|
144 | 141 | (file_path, filenode), |
|
145 | 142 | ) |
|
146 | 143 | } |
|
147 | 144 | inner_walker = gitrepo.walk(walk_start, _OUR_ORDER) |
|
148 | 145 | for w in inner_walker: |
|
149 | 146 | if w.id.hex in parent_options: |
|
150 | 147 | return w.id.hex |
|
151 | 148 | raise error.ProgrammingError( |
|
152 | 149 | 'Unable to find introducing commit for %s node %s from %s', |
|
153 | 150 | (file_path, filenode, walk_start), |
|
154 | 151 | ) |
|
155 | 152 | |
|
156 | 153 | |
|
157 | 154 | def fill_in_filelog(gitrepo, db, startcommit, path, startfilenode): |
|
158 | 155 | """Given a starting commit and path, fill in a filelog's parent pointers. |
|
159 | 156 | |
|
160 | 157 | Args: |
|
161 | 158 | gitrepo: a pygit2.Repository |
|
162 | 159 | db: a handle to our sqlite database |
|
163 | 160 | startcommit: a hexlified node id for the commit to start at |
|
164 | 161 | path: the path of the file whose parent pointers we should fill in. |
|
165 | 162 | filenode: the hexlified node id of the file at startcommit |
|
166 | 163 | |
|
167 | 164 | TODO: make filenode optional |
|
168 | 165 | """ |
|
169 | 166 | assert isinstance( |
|
170 | 167 | startcommit, str |
|
171 | 168 | ), 'startcommit must be str, got %r' % type(startcommit) |
|
172 | 169 | assert isinstance( |
|
173 | 170 | startfilenode, str |
|
174 | 171 | ), 'startfilenode must be str, got %r' % type(startfilenode) |
|
175 | 172 | visit = collections.deque([(startcommit, startfilenode)]) |
|
176 | 173 | while visit: |
|
177 | 174 | cnode, filenode = visit.popleft() |
|
178 | 175 | commit = gitrepo[cnode] |
|
179 | 176 | parents = [] |
|
180 | 177 | for parent in commit.parents: |
|
181 | 178 | t = parent.tree |
|
182 | 179 | for comp in path.split('/'): |
|
183 | 180 | try: |
|
184 | 181 | t = gitrepo[t[comp].id] |
|
185 | 182 | except KeyError: |
|
186 | 183 | break |
|
187 | 184 | else: |
|
188 | 185 | introducer = _find_nearest_ancestor_introducing_node( |
|
189 | 186 | db, gitrepo, path, parent.id, t.id.hex |
|
190 | 187 | ) |
|
191 | 188 | parents.append((introducer, t.id.hex)) |
|
192 | 189 | p1node = p1fnode = p2node = p2fnode = gitutil.nullgit |
|
193 | 190 | for par, parfnode in parents: |
|
194 | 191 | found = int( |
|
195 | 192 | db.execute( |
|
196 | 193 | 'SELECT COUNT(*) FROM changedfiles WHERE ' |
|
197 | 194 | 'node = ? AND filename = ? AND filenode = ? AND ' |
|
198 | 195 | 'p1node NOT NULL', |
|
199 | 196 | (par, path, parfnode), |
|
200 | 197 | ).fetchone()[0] |
|
201 | 198 | ) |
|
202 | 199 | if found == 0: |
|
203 | 200 | assert par is not None |
|
204 | 201 | visit.append((par, parfnode)) |
|
205 | 202 | if parents: |
|
206 | 203 | p1node, p1fnode = parents[0] |
|
207 | 204 | if len(parents) == 2: |
|
208 | 205 | p2node, p2fnode = parents[1] |
|
209 | 206 | if len(parents) > 2: |
|
210 | 207 | raise error.ProgrammingError( |
|
211 | 208 | b"git support can't handle octopus merges" |
|
212 | 209 | ) |
|
213 | 210 | db.execute( |
|
214 | 211 | 'UPDATE changedfiles SET ' |
|
215 | 212 | 'p1node = ?, p1filenode = ?, p2node = ?, p2filenode = ? ' |
|
216 | 213 | 'WHERE node = ? AND filename = ? AND filenode = ?', |
|
217 | 214 | (p1node, p1fnode, p2node, p2fnode, commit.id.hex, path, filenode), |
|
218 | 215 | ) |
|
219 | 216 | db.commit() |
|
220 | 217 | |
|
221 | 218 | |
|
222 | 219 | def _index_repo( |
|
223 | 220 | gitrepo, |
|
224 | 221 | db, |
|
225 | 222 | logfn=lambda x: None, |
|
226 | 223 | progress_factory=lambda *args, **kwargs: None, |
|
227 | 224 | ): |
|
228 | 225 | # Identify all references so we can tell the walker to visit all of them. |
|
229 | 226 | all_refs = gitrepo.listall_references() |
|
230 | 227 | possible_heads = set() |
|
231 | 228 | prog = progress_factory(b'refs') |
|
232 | 229 | for pos, ref in enumerate(all_refs): |
|
233 | 230 | if prog is not None: |
|
234 | 231 | prog.update(pos) |
|
235 | 232 | if not ( |
|
236 | 233 | ref.startswith('refs/heads/') # local branch |
|
237 | 234 | or ref.startswith('refs/tags/') # tag |
|
238 | 235 | or ref.startswith('refs/remotes/') # remote branch |
|
239 | 236 | or ref.startswith('refs/hg/') # from this extension |
|
240 | 237 | ): |
|
241 | 238 | continue |
|
242 | 239 | try: |
|
243 | 240 | start = gitrepo.lookup_reference(ref).peel(pygit2.GIT_OBJ_COMMIT) |
|
244 | 241 | except ValueError: |
|
245 | 242 | # No commit to be found, so we don't care for hg's purposes. |
|
246 | 243 | continue |
|
247 | 244 | possible_heads.add(start.id) |
|
248 | 245 | # Optimization: if the list of heads hasn't changed, don't |
|
249 | 246 | # reindex, the changelog. This doesn't matter on small |
|
250 | 247 | # repositories, but on even moderately deep histories (eg cpython) |
|
251 | 248 | # this is a very important performance win. |
|
252 | 249 | # |
|
253 | 250 | # TODO: we should figure out how to incrementally index history |
|
254 | 251 | # (preferably by detecting rewinds!) so that we don't have to do a |
|
255 | 252 | # full changelog walk every time a new commit is created. |
|
256 | 253 | cache_heads = { |
|
257 | 254 | pycompat.sysstr(x[0]) |
|
258 | 255 | for x in db.execute('SELECT node FROM possible_heads') |
|
259 | 256 | } |
|
260 | 257 | walker = None |
|
261 | 258 | cur_cache_heads = {h.hex for h in possible_heads} |
|
262 | 259 | if cur_cache_heads == cache_heads: |
|
263 | 260 | return |
|
264 | 261 | logfn(b'heads mismatch, rebuilding dagcache\n') |
|
265 | 262 | for start in possible_heads: |
|
266 | 263 | if walker is None: |
|
267 | 264 | walker = gitrepo.walk(start, _OUR_ORDER) |
|
268 | 265 | else: |
|
269 | 266 | walker.push(start) |
|
270 | 267 | |
|
271 | 268 | # Empty out the existing changelog. Even for large-ish histories |
|
272 | 269 | # we can do the top-level "walk all the commits" dance very |
|
273 | 270 | # quickly as long as we don't need to figure out the changed files |
|
274 | 271 | # list. |
|
275 | 272 | db.execute('DELETE FROM changelog') |
|
276 | 273 | if prog is not None: |
|
277 | 274 | prog.complete() |
|
278 | 275 | prog = progress_factory(b'commits') |
|
279 | 276 | # This walker is sure to visit all the revisions in history, but |
|
280 | 277 | # only once. |
|
281 | 278 | for pos, commit in enumerate(walker): |
|
282 | 279 | if prog is not None: |
|
283 | 280 | prog.update(pos) |
|
284 | p1 = p2 = nullhex | |
|
281 | p1 = p2 = sha1nodeconstants.nullhex | |
|
285 | 282 | if len(commit.parents) > 2: |
|
286 | 283 | raise error.ProgrammingError( |
|
287 | 284 | ( |
|
288 | 285 | b"git support can't handle octopus merges, " |
|
289 | 286 | b"found a commit with %d parents :(" |
|
290 | 287 | ) |
|
291 | 288 | % len(commit.parents) |
|
292 | 289 | ) |
|
293 | 290 | if commit.parents: |
|
294 | 291 | p1 = commit.parents[0].id.hex |
|
295 | 292 | if len(commit.parents) == 2: |
|
296 | 293 | p2 = commit.parents[1].id.hex |
|
297 | 294 | db.execute( |
|
298 | 295 | 'INSERT INTO changelog (rev, node, p1, p2) VALUES(?, ?, ?, ?)', |
|
299 | 296 | (pos, commit.id.hex, p1, p2), |
|
300 | 297 | ) |
|
301 | 298 | |
|
302 | 299 | num_changedfiles = db.execute( |
|
303 | 300 | "SELECT COUNT(*) from changedfiles WHERE node = ?", |
|
304 | 301 | (commit.id.hex,), |
|
305 | 302 | ).fetchone()[0] |
|
306 | 303 | if not num_changedfiles: |
|
307 | 304 | files = {} |
|
308 | 305 | # I *think* we only need to check p1 for changed files |
|
309 | 306 | # (and therefore linkrevs), because any node that would |
|
310 | 307 | # actually have this commit as a linkrev would be |
|
311 | 308 | # completely new in this rev. |
|
312 | 309 | p1 = commit.parents[0].id.hex if commit.parents else None |
|
313 | 310 | if p1 is not None: |
|
314 | 311 | patchgen = gitrepo.diff(p1, commit.id.hex, flags=_DIFF_FLAGS) |
|
315 | 312 | else: |
|
316 | 313 | patchgen = commit.tree.diff_to_tree( |
|
317 | 314 | swap=True, flags=_DIFF_FLAGS |
|
318 | 315 | ) |
|
319 | 316 | new_files = (p.delta.new_file for p in patchgen) |
|
320 | 317 | files = { |
|
321 |
nf.path: nf.id.hex |
|
|
318 | nf.path: nf.id.hex | |
|
319 | for nf in new_files | |
|
320 | if nf.id.raw != sha1nodeconstants.nullid | |
|
322 | 321 | } |
|
323 | 322 | for p, n in files.items(): |
|
324 | 323 | # We intentionally set NULLs for any file parentage |
|
325 | 324 | # information so it'll get demand-computed later. We |
|
326 | 325 | # used to do it right here, and it was _very_ slow. |
|
327 | 326 | db.execute( |
|
328 | 327 | 'INSERT INTO changedfiles (' |
|
329 | 328 | 'node, filename, filenode, p1node, p1filenode, p2node, ' |
|
330 | 329 | 'p2filenode) VALUES(?, ?, ?, ?, ?, ?, ?)', |
|
331 | 330 | (commit.id.hex, p, n, None, None, None, None), |
|
332 | 331 | ) |
|
333 | 332 | db.execute('DELETE FROM heads') |
|
334 | 333 | db.execute('DELETE FROM possible_heads') |
|
335 | 334 | for hid in possible_heads: |
|
336 | 335 | h = hid.hex |
|
337 | 336 | db.execute('INSERT INTO possible_heads (node) VALUES(?)', (h,)) |
|
338 | 337 | haschild = db.execute( |
|
339 | 338 | 'SELECT COUNT(*) FROM changelog WHERE p1 = ? OR p2 = ?', (h, h) |
|
340 | 339 | ).fetchone()[0] |
|
341 | 340 | if not haschild: |
|
342 | 341 | db.execute('INSERT INTO heads (node) VALUES(?)', (h,)) |
|
343 | 342 | |
|
344 | 343 | db.commit() |
|
345 | 344 | if prog is not None: |
|
346 | 345 | prog.complete() |
|
347 | 346 | |
|
348 | 347 | |
|
349 | 348 | def get_index( |
|
350 | 349 | gitrepo, logfn=lambda x: None, progress_factory=lambda *args, **kwargs: None |
|
351 | 350 | ): |
|
352 | 351 | cachepath = os.path.join( |
|
353 | 352 | pycompat.fsencode(gitrepo.path), b'..', b'.hg', b'cache' |
|
354 | 353 | ) |
|
355 | 354 | if not os.path.exists(cachepath): |
|
356 | 355 | os.makedirs(cachepath) |
|
357 | 356 | dbpath = os.path.join(cachepath, b'git-commits.sqlite') |
|
358 | 357 | db = _createdb(dbpath) |
|
359 | 358 | # TODO check against gitrepo heads before doing a full index |
|
360 | 359 | # TODO thread a ui.progress call into this layer |
|
361 | 360 | _index_repo(gitrepo, db, logfn, progress_factory) |
|
362 | 361 | return db |
@@ -1,390 +1,391 b'' | |||
|
1 | 1 | # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org> |
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | |
|
6 | 6 | '''commands to sign and verify changesets''' |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import binascii |
|
11 | 11 | import os |
|
12 | 12 | |
|
13 | 13 | from mercurial.i18n import _ |
|
14 | 14 | from mercurial.node import ( |
|
15 | 15 | bin, |
|
16 | 16 | hex, |
|
17 | nullid, | |
|
18 | 17 | short, |
|
19 | 18 | ) |
|
20 | 19 | from mercurial import ( |
|
21 | 20 | cmdutil, |
|
22 | 21 | error, |
|
23 | 22 | help, |
|
24 | 23 | match, |
|
25 | 24 | pycompat, |
|
26 | 25 | registrar, |
|
27 | 26 | ) |
|
28 | 27 | from mercurial.utils import ( |
|
29 | 28 | dateutil, |
|
30 | 29 | procutil, |
|
31 | 30 | ) |
|
32 | 31 | |
|
33 | 32 | cmdtable = {} |
|
34 | 33 | command = registrar.command(cmdtable) |
|
35 | 34 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
36 | 35 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
37 | 36 | # be specifying the version(s) of Mercurial they are tested with, or |
|
38 | 37 | # leave the attribute unspecified. |
|
39 | 38 | testedwith = b'ships-with-hg-core' |
|
40 | 39 | |
|
41 | 40 | configtable = {} |
|
42 | 41 | configitem = registrar.configitem(configtable) |
|
43 | 42 | |
|
44 | 43 | configitem( |
|
45 | 44 | b'gpg', |
|
46 | 45 | b'cmd', |
|
47 | 46 | default=b'gpg', |
|
48 | 47 | ) |
|
49 | 48 | configitem( |
|
50 | 49 | b'gpg', |
|
51 | 50 | b'key', |
|
52 | 51 | default=None, |
|
53 | 52 | ) |
|
54 | 53 | configitem( |
|
55 | 54 | b'gpg', |
|
56 | 55 | b'.*', |
|
57 | 56 | default=None, |
|
58 | 57 | generic=True, |
|
59 | 58 | ) |
|
60 | 59 | |
|
61 | 60 | # Custom help category |
|
62 | 61 | _HELP_CATEGORY = b'gpg' |
|
63 | 62 | help.CATEGORY_ORDER.insert( |
|
64 | 63 | help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP), _HELP_CATEGORY |
|
65 | 64 | ) |
|
66 | 65 | help.CATEGORY_NAMES[_HELP_CATEGORY] = b'Signing changes (GPG)' |
|
67 | 66 | |
|
68 | 67 | |
|
69 | 68 | class gpg(object): |
|
70 | 69 | def __init__(self, path, key=None): |
|
71 | 70 | self.path = path |
|
72 | 71 | self.key = (key and b" --local-user \"%s\"" % key) or b"" |
|
73 | 72 | |
|
74 | 73 | def sign(self, data): |
|
75 | 74 | gpgcmd = b"%s --sign --detach-sign%s" % (self.path, self.key) |
|
76 | 75 | return procutil.filter(data, gpgcmd) |
|
77 | 76 | |
|
78 | 77 | def verify(self, data, sig): |
|
79 | 78 | """ returns of the good and bad signatures""" |
|
80 | 79 | sigfile = datafile = None |
|
81 | 80 | try: |
|
82 | 81 | # create temporary files |
|
83 | 82 | fd, sigfile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".sig") |
|
84 | 83 | fp = os.fdopen(fd, 'wb') |
|
85 | 84 | fp.write(sig) |
|
86 | 85 | fp.close() |
|
87 | 86 | fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt") |
|
88 | 87 | fp = os.fdopen(fd, 'wb') |
|
89 | 88 | fp.write(data) |
|
90 | 89 | fp.close() |
|
91 | 90 | gpgcmd = ( |
|
92 | 91 | b"%s --logger-fd 1 --status-fd 1 --verify \"%s\" \"%s\"" |
|
93 | 92 | % ( |
|
94 | 93 | self.path, |
|
95 | 94 | sigfile, |
|
96 | 95 | datafile, |
|
97 | 96 | ) |
|
98 | 97 | ) |
|
99 | 98 | ret = procutil.filter(b"", gpgcmd) |
|
100 | 99 | finally: |
|
101 | 100 | for f in (sigfile, datafile): |
|
102 | 101 | try: |
|
103 | 102 | if f: |
|
104 | 103 | os.unlink(f) |
|
105 | 104 | except OSError: |
|
106 | 105 | pass |
|
107 | 106 | keys = [] |
|
108 | 107 | key, fingerprint = None, None |
|
109 | 108 | for l in ret.splitlines(): |
|
110 | 109 | # see DETAILS in the gnupg documentation |
|
111 | 110 | # filter the logger output |
|
112 | 111 | if not l.startswith(b"[GNUPG:]"): |
|
113 | 112 | continue |
|
114 | 113 | l = l[9:] |
|
115 | 114 | if l.startswith(b"VALIDSIG"): |
|
116 | 115 | # fingerprint of the primary key |
|
117 | 116 | fingerprint = l.split()[10] |
|
118 | 117 | elif l.startswith(b"ERRSIG"): |
|
119 | 118 | key = l.split(b" ", 3)[:2] |
|
120 | 119 | key.append(b"") |
|
121 | 120 | fingerprint = None |
|
122 | 121 | elif ( |
|
123 | 122 | l.startswith(b"GOODSIG") |
|
124 | 123 | or l.startswith(b"EXPSIG") |
|
125 | 124 | or l.startswith(b"EXPKEYSIG") |
|
126 | 125 | or l.startswith(b"BADSIG") |
|
127 | 126 | ): |
|
128 | 127 | if key is not None: |
|
129 | 128 | keys.append(key + [fingerprint]) |
|
130 | 129 | key = l.split(b" ", 2) |
|
131 | 130 | fingerprint = None |
|
132 | 131 | if key is not None: |
|
133 | 132 | keys.append(key + [fingerprint]) |
|
134 | 133 | return keys |
|
135 | 134 | |
|
136 | 135 | |
|
137 | 136 | def newgpg(ui, **opts): |
|
138 | 137 | """create a new gpg instance""" |
|
139 | 138 | gpgpath = ui.config(b"gpg", b"cmd") |
|
140 | 139 | gpgkey = opts.get('key') |
|
141 | 140 | if not gpgkey: |
|
142 | 141 | gpgkey = ui.config(b"gpg", b"key") |
|
143 | 142 | return gpg(gpgpath, gpgkey) |
|
144 | 143 | |
|
145 | 144 | |
|
146 | 145 | def sigwalk(repo): |
|
147 | 146 | """ |
|
148 | 147 | walk over every sigs, yields a couple |
|
149 | 148 | ((node, version, sig), (filename, linenumber)) |
|
150 | 149 | """ |
|
151 | 150 | |
|
152 | 151 | def parsefile(fileiter, context): |
|
153 | 152 | ln = 1 |
|
154 | 153 | for l in fileiter: |
|
155 | 154 | if not l: |
|
156 | 155 | continue |
|
157 | 156 | yield (l.split(b" ", 2), (context, ln)) |
|
158 | 157 | ln += 1 |
|
159 | 158 | |
|
160 | 159 | # read the heads |
|
161 | 160 | fl = repo.file(b".hgsigs") |
|
162 | 161 | for r in reversed(fl.heads()): |
|
163 | 162 | fn = b".hgsigs|%s" % short(r) |
|
164 | 163 | for item in parsefile(fl.read(r).splitlines(), fn): |
|
165 | 164 | yield item |
|
166 | 165 | try: |
|
167 | 166 | # read local signatures |
|
168 | 167 | fn = b"localsigs" |
|
169 | 168 | for item in parsefile(repo.vfs(fn), fn): |
|
170 | 169 | yield item |
|
171 | 170 | except IOError: |
|
172 | 171 | pass |
|
173 | 172 | |
|
174 | 173 | |
|
175 | 174 | def getkeys(ui, repo, mygpg, sigdata, context): |
|
176 | 175 | """get the keys who signed a data""" |
|
177 | 176 | fn, ln = context |
|
178 | 177 | node, version, sig = sigdata |
|
179 | 178 | prefix = b"%s:%d" % (fn, ln) |
|
180 | 179 | node = bin(node) |
|
181 | 180 | |
|
182 | 181 | data = node2txt(repo, node, version) |
|
183 | 182 | sig = binascii.a2b_base64(sig) |
|
184 | 183 | keys = mygpg.verify(data, sig) |
|
185 | 184 | |
|
186 | 185 | validkeys = [] |
|
187 | 186 | # warn for expired key and/or sigs |
|
188 | 187 | for key in keys: |
|
189 | 188 | if key[0] == b"ERRSIG": |
|
190 | 189 | ui.write(_(b"%s Unknown key ID \"%s\"\n") % (prefix, key[1])) |
|
191 | 190 | continue |
|
192 | 191 | if key[0] == b"BADSIG": |
|
193 | 192 | ui.write(_(b"%s Bad signature from \"%s\"\n") % (prefix, key[2])) |
|
194 | 193 | continue |
|
195 | 194 | if key[0] == b"EXPSIG": |
|
196 | 195 | ui.write( |
|
197 | 196 | _(b"%s Note: Signature has expired (signed by: \"%s\")\n") |
|
198 | 197 | % (prefix, key[2]) |
|
199 | 198 | ) |
|
200 | 199 | elif key[0] == b"EXPKEYSIG": |
|
201 | 200 | ui.write( |
|
202 | 201 | _(b"%s Note: This key has expired (signed by: \"%s\")\n") |
|
203 | 202 | % (prefix, key[2]) |
|
204 | 203 | ) |
|
205 | 204 | validkeys.append((key[1], key[2], key[3])) |
|
206 | 205 | return validkeys |
|
207 | 206 | |
|
208 | 207 | |
|
209 | 208 | @command(b"sigs", [], _(b'hg sigs'), helpcategory=_HELP_CATEGORY) |
|
210 | 209 | def sigs(ui, repo): |
|
211 | 210 | """list signed changesets""" |
|
212 | 211 | mygpg = newgpg(ui) |
|
213 | 212 | revs = {} |
|
214 | 213 | |
|
215 | 214 | for data, context in sigwalk(repo): |
|
216 | 215 | node, version, sig = data |
|
217 | 216 | fn, ln = context |
|
218 | 217 | try: |
|
219 | 218 | n = repo.lookup(node) |
|
220 | 219 | except KeyError: |
|
221 | 220 | ui.warn(_(b"%s:%d node does not exist\n") % (fn, ln)) |
|
222 | 221 | continue |
|
223 | 222 | r = repo.changelog.rev(n) |
|
224 | 223 | keys = getkeys(ui, repo, mygpg, data, context) |
|
225 | 224 | if not keys: |
|
226 | 225 | continue |
|
227 | 226 | revs.setdefault(r, []) |
|
228 | 227 | revs[r].extend(keys) |
|
229 | 228 | for rev in sorted(revs, reverse=True): |
|
230 | 229 | for k in revs[rev]: |
|
231 | 230 | r = b"%5d:%s" % (rev, hex(repo.changelog.node(rev))) |
|
232 | 231 | ui.write(b"%-30s %s\n" % (keystr(ui, k), r)) |
|
233 | 232 | |
|
234 | 233 | |
|
235 | 234 | @command(b"sigcheck", [], _(b'hg sigcheck REV'), helpcategory=_HELP_CATEGORY) |
|
236 | 235 | def sigcheck(ui, repo, rev): |
|
237 | 236 | """verify all the signatures there may be for a particular revision""" |
|
238 | 237 | mygpg = newgpg(ui) |
|
239 | 238 | rev = repo.lookup(rev) |
|
240 | 239 | hexrev = hex(rev) |
|
241 | 240 | keys = [] |
|
242 | 241 | |
|
243 | 242 | for data, context in sigwalk(repo): |
|
244 | 243 | node, version, sig = data |
|
245 | 244 | if node == hexrev: |
|
246 | 245 | k = getkeys(ui, repo, mygpg, data, context) |
|
247 | 246 | if k: |
|
248 | 247 | keys.extend(k) |
|
249 | 248 | |
|
250 | 249 | if not keys: |
|
251 | 250 | ui.write(_(b"no valid signature for %s\n") % short(rev)) |
|
252 | 251 | return |
|
253 | 252 | |
|
254 | 253 | # print summary |
|
255 | 254 | ui.write(_(b"%s is signed by:\n") % short(rev)) |
|
256 | 255 | for key in keys: |
|
257 | 256 | ui.write(b" %s\n" % keystr(ui, key)) |
|
258 | 257 | |
|
259 | 258 | |
|
260 | 259 | def keystr(ui, key): |
|
261 | 260 | """associate a string to a key (username, comment)""" |
|
262 | 261 | keyid, user, fingerprint = key |
|
263 | 262 | comment = ui.config(b"gpg", fingerprint) |
|
264 | 263 | if comment: |
|
265 | 264 | return b"%s (%s)" % (user, comment) |
|
266 | 265 | else: |
|
267 | 266 | return user |
|
268 | 267 | |
|
269 | 268 | |
|
270 | 269 | @command( |
|
271 | 270 | b"sign", |
|
272 | 271 | [ |
|
273 | 272 | (b'l', b'local', None, _(b'make the signature local')), |
|
274 | 273 | (b'f', b'force', None, _(b'sign even if the sigfile is modified')), |
|
275 | 274 | ( |
|
276 | 275 | b'', |
|
277 | 276 | b'no-commit', |
|
278 | 277 | None, |
|
279 | 278 | _(b'do not commit the sigfile after signing'), |
|
280 | 279 | ), |
|
281 | 280 | (b'k', b'key', b'', _(b'the key id to sign with'), _(b'ID')), |
|
282 | 281 | (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')), |
|
283 | 282 | (b'e', b'edit', False, _(b'invoke editor on commit messages')), |
|
284 | 283 | ] |
|
285 | 284 | + cmdutil.commitopts2, |
|
286 | 285 | _(b'hg sign [OPTION]... [REV]...'), |
|
287 | 286 | helpcategory=_HELP_CATEGORY, |
|
288 | 287 | ) |
|
289 | 288 | def sign(ui, repo, *revs, **opts): |
|
290 | 289 | """add a signature for the current or given revision |
|
291 | 290 | |
|
292 | 291 | If no revision is given, the parent of the working directory is used, |
|
293 | 292 | or tip if no revision is checked out. |
|
294 | 293 | |
|
295 | 294 | The ``gpg.cmd`` config setting can be used to specify the command |
|
296 | 295 | to run. A default key can be specified with ``gpg.key``. |
|
297 | 296 | |
|
298 | 297 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
299 | 298 | """ |
|
300 | 299 | with repo.wlock(): |
|
301 | 300 | return _dosign(ui, repo, *revs, **opts) |
|
302 | 301 | |
|
303 | 302 | |
|
304 | 303 | def _dosign(ui, repo, *revs, **opts): |
|
305 | 304 | mygpg = newgpg(ui, **opts) |
|
306 | 305 | opts = pycompat.byteskwargs(opts) |
|
307 | 306 | sigver = b"0" |
|
308 | 307 | sigmessage = b"" |
|
309 | 308 | |
|
310 | 309 | date = opts.get(b'date') |
|
311 | 310 | if date: |
|
312 | 311 | opts[b'date'] = dateutil.parsedate(date) |
|
313 | 312 | |
|
314 | 313 | if revs: |
|
315 | 314 | nodes = [repo.lookup(n) for n in revs] |
|
316 | 315 | else: |
|
317 | nodes = [node for node in repo.dirstate.parents() if node != nullid] | |
|
316 | nodes = [ | |
|
317 | node for node in repo.dirstate.parents() if node != repo.nullid | |
|
318 | ] | |
|
318 | 319 | if len(nodes) > 1: |
|
319 | 320 | raise error.Abort( |
|
320 | 321 | _(b'uncommitted merge - please provide a specific revision') |
|
321 | 322 | ) |
|
322 | 323 | if not nodes: |
|
323 | 324 | nodes = [repo.changelog.tip()] |
|
324 | 325 | |
|
325 | 326 | for n in nodes: |
|
326 | 327 | hexnode = hex(n) |
|
327 | 328 | ui.write(_(b"signing %d:%s\n") % (repo.changelog.rev(n), short(n))) |
|
328 | 329 | # build data |
|
329 | 330 | data = node2txt(repo, n, sigver) |
|
330 | 331 | sig = mygpg.sign(data) |
|
331 | 332 | if not sig: |
|
332 | 333 | raise error.Abort(_(b"error while signing")) |
|
333 | 334 | sig = binascii.b2a_base64(sig) |
|
334 | 335 | sig = sig.replace(b"\n", b"") |
|
335 | 336 | sigmessage += b"%s %s %s\n" % (hexnode, sigver, sig) |
|
336 | 337 | |
|
337 | 338 | # write it |
|
338 | 339 | if opts[b'local']: |
|
339 | 340 | repo.vfs.append(b"localsigs", sigmessage) |
|
340 | 341 | return |
|
341 | 342 | |
|
342 | 343 | if not opts[b"force"]: |
|
343 | 344 | msigs = match.exact([b'.hgsigs']) |
|
344 | 345 | if any(repo.status(match=msigs, unknown=True, ignored=True)): |
|
345 | 346 | raise error.Abort( |
|
346 | 347 | _(b"working copy of .hgsigs is changed "), |
|
347 | 348 | hint=_(b"please commit .hgsigs manually"), |
|
348 | 349 | ) |
|
349 | 350 | |
|
350 | 351 | sigsfile = repo.wvfs(b".hgsigs", b"ab") |
|
351 | 352 | sigsfile.write(sigmessage) |
|
352 | 353 | sigsfile.close() |
|
353 | 354 | |
|
354 | 355 | if b'.hgsigs' not in repo.dirstate: |
|
355 | 356 | repo[None].add([b".hgsigs"]) |
|
356 | 357 | |
|
357 | 358 | if opts[b"no_commit"]: |
|
358 | 359 | return |
|
359 | 360 | |
|
360 | 361 | message = opts[b'message'] |
|
361 | 362 | if not message: |
|
362 | 363 | # we don't translate commit messages |
|
363 | 364 | message = b"\n".join( |
|
364 | 365 | [b"Added signature for changeset %s" % short(n) for n in nodes] |
|
365 | 366 | ) |
|
366 | 367 | try: |
|
367 | 368 | editor = cmdutil.getcommiteditor( |
|
368 | 369 | editform=b'gpg.sign', **pycompat.strkwargs(opts) |
|
369 | 370 | ) |
|
370 | 371 | repo.commit( |
|
371 | 372 | message, opts[b'user'], opts[b'date'], match=msigs, editor=editor |
|
372 | 373 | ) |
|
373 | 374 | except ValueError as inst: |
|
374 | 375 | raise error.Abort(pycompat.bytestr(inst)) |
|
375 | 376 | |
|
376 | 377 | |
|
377 | 378 | def node2txt(repo, node, ver): |
|
378 | 379 | """map a manifest into some text""" |
|
379 | 380 | if ver == b"0": |
|
380 | 381 | return b"%s\n" % hex(node) |
|
381 | 382 | else: |
|
382 | 383 | raise error.Abort(_(b"unknown signature version")) |
|
383 | 384 | |
|
384 | 385 | |
|
385 | 386 | def extsetup(ui): |
|
386 | 387 | # Add our category before "Repository maintenance". |
|
387 | 388 | help.CATEGORY_ORDER.insert( |
|
388 | 389 | help.CATEGORY_ORDER.index(command.CATEGORY_MAINTENANCE), _HELP_CATEGORY |
|
389 | 390 | ) |
|
390 | 391 | help.CATEGORY_NAMES[_HELP_CATEGORY] = b'GPG signing' |
@@ -1,389 +1,388 b'' | |||
|
1 | 1 | # Minimal support for git commands on an hg repository |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''browse the repository in a graphical way |
|
9 | 9 | |
|
10 | 10 | The hgk extension allows browsing the history of a repository in a |
|
11 | 11 | graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not |
|
12 | 12 | distributed with Mercurial.) |
|
13 | 13 | |
|
14 | 14 | hgk consists of two parts: a Tcl script that does the displaying and |
|
15 | 15 | querying of information, and an extension to Mercurial named hgk.py, |
|
16 | 16 | which provides hooks for hgk to get information. hgk can be found in |
|
17 | 17 | the contrib directory, and the extension is shipped in the hgext |
|
18 | 18 | repository, and needs to be enabled. |
|
19 | 19 | |
|
20 | 20 | The :hg:`view` command will launch the hgk Tcl script. For this command |
|
21 | 21 | to work, hgk must be in your search path. Alternately, you can specify |
|
22 | 22 | the path to hgk in your configuration file:: |
|
23 | 23 | |
|
24 | 24 | [hgk] |
|
25 | 25 | path = /location/of/hgk |
|
26 | 26 | |
|
27 | 27 | hgk can make use of the extdiff extension to visualize revisions. |
|
28 | 28 | Assuming you had already configured extdiff vdiff command, just add:: |
|
29 | 29 | |
|
30 | 30 | [hgk] |
|
31 | 31 | vdiff=vdiff |
|
32 | 32 | |
|
33 | 33 | Revisions context menu will now display additional entries to fire |
|
34 | 34 | vdiff on hovered and selected revisions. |
|
35 | 35 | ''' |
|
36 | 36 | |
|
37 | 37 | from __future__ import absolute_import |
|
38 | 38 | |
|
39 | 39 | import os |
|
40 | 40 | |
|
41 | 41 | from mercurial.i18n import _ |
|
42 | 42 | from mercurial.node import ( |
|
43 | nullid, | |
|
44 | 43 | nullrev, |
|
45 | 44 | short, |
|
46 | 45 | ) |
|
47 | 46 | from mercurial import ( |
|
48 | 47 | commands, |
|
49 | 48 | obsolete, |
|
50 | 49 | patch, |
|
51 | 50 | pycompat, |
|
52 | 51 | registrar, |
|
53 | 52 | scmutil, |
|
54 | 53 | ) |
|
55 | 54 | |
|
56 | 55 | cmdtable = {} |
|
57 | 56 | command = registrar.command(cmdtable) |
|
58 | 57 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
59 | 58 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
60 | 59 | # be specifying the version(s) of Mercurial they are tested with, or |
|
61 | 60 | # leave the attribute unspecified. |
|
62 | 61 | testedwith = b'ships-with-hg-core' |
|
63 | 62 | |
|
64 | 63 | configtable = {} |
|
65 | 64 | configitem = registrar.configitem(configtable) |
|
66 | 65 | |
|
67 | 66 | configitem( |
|
68 | 67 | b'hgk', |
|
69 | 68 | b'path', |
|
70 | 69 | default=b'hgk', |
|
71 | 70 | ) |
|
72 | 71 | |
|
73 | 72 | |
|
74 | 73 | @command( |
|
75 | 74 | b'debug-diff-tree', |
|
76 | 75 | [ |
|
77 | 76 | (b'p', b'patch', None, _(b'generate patch')), |
|
78 | 77 | (b'r', b'recursive', None, _(b'recursive')), |
|
79 | 78 | (b'P', b'pretty', None, _(b'pretty')), |
|
80 | 79 | (b's', b'stdin', None, _(b'stdin')), |
|
81 | 80 | (b'C', b'copy', None, _(b'detect copies')), |
|
82 | 81 | (b'S', b'search', b"", _(b'search')), |
|
83 | 82 | ], |
|
84 | 83 | b'[OPTION]... NODE1 NODE2 [FILE]...', |
|
85 | 84 | inferrepo=True, |
|
86 | 85 | ) |
|
87 | 86 | def difftree(ui, repo, node1=None, node2=None, *files, **opts): |
|
88 | 87 | """diff trees from two commits""" |
|
89 | 88 | |
|
90 | 89 | def __difftree(repo, node1, node2, files=None): |
|
91 | 90 | assert node2 is not None |
|
92 | 91 | if files is None: |
|
93 | 92 | files = [] |
|
94 | 93 | mmap = repo[node1].manifest() |
|
95 | 94 | mmap2 = repo[node2].manifest() |
|
96 | 95 | m = scmutil.match(repo[node1], files) |
|
97 | 96 | st = repo.status(node1, node2, m) |
|
98 | empty = short(nullid) | |
|
97 | empty = short(repo.nullid) | |
|
99 | 98 | |
|
100 | 99 | for f in st.modified: |
|
101 | 100 | # TODO get file permissions |
|
102 | 101 | ui.writenoi18n( |
|
103 | 102 | b":100664 100664 %s %s M\t%s\t%s\n" |
|
104 | 103 | % (short(mmap[f]), short(mmap2[f]), f, f) |
|
105 | 104 | ) |
|
106 | 105 | for f in st.added: |
|
107 | 106 | ui.writenoi18n( |
|
108 | 107 | b":000000 100664 %s %s N\t%s\t%s\n" |
|
109 | 108 | % (empty, short(mmap2[f]), f, f) |
|
110 | 109 | ) |
|
111 | 110 | for f in st.removed: |
|
112 | 111 | ui.writenoi18n( |
|
113 | 112 | b":100664 000000 %s %s D\t%s\t%s\n" |
|
114 | 113 | % (short(mmap[f]), empty, f, f) |
|
115 | 114 | ) |
|
116 | 115 | |
|
117 | 116 | ## |
|
118 | 117 | |
|
119 | 118 | while True: |
|
120 | 119 | if opts['stdin']: |
|
121 | 120 | line = ui.fin.readline() |
|
122 | 121 | if not line: |
|
123 | 122 | break |
|
124 | 123 | line = line.rstrip(pycompat.oslinesep).split(b' ') |
|
125 | 124 | node1 = line[0] |
|
126 | 125 | if len(line) > 1: |
|
127 | 126 | node2 = line[1] |
|
128 | 127 | else: |
|
129 | 128 | node2 = None |
|
130 | 129 | node1 = repo.lookup(node1) |
|
131 | 130 | if node2: |
|
132 | 131 | node2 = repo.lookup(node2) |
|
133 | 132 | else: |
|
134 | 133 | node2 = node1 |
|
135 | 134 | node1 = repo.changelog.parents(node1)[0] |
|
136 | 135 | if opts['patch']: |
|
137 | 136 | if opts['pretty']: |
|
138 | 137 | catcommit(ui, repo, node2, b"") |
|
139 | 138 | m = scmutil.match(repo[node1], files) |
|
140 | 139 | diffopts = patch.difffeatureopts(ui) |
|
141 | 140 | diffopts.git = True |
|
142 | 141 | chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts) |
|
143 | 142 | for chunk in chunks: |
|
144 | 143 | ui.write(chunk) |
|
145 | 144 | else: |
|
146 | 145 | __difftree(repo, node1, node2, files=files) |
|
147 | 146 | if not opts['stdin']: |
|
148 | 147 | break |
|
149 | 148 | |
|
150 | 149 | |
|
151 | 150 | def catcommit(ui, repo, n, prefix, ctx=None): |
|
152 | 151 | nlprefix = b'\n' + prefix |
|
153 | 152 | if ctx is None: |
|
154 | 153 | ctx = repo[n] |
|
155 | 154 | # use ctx.node() instead ?? |
|
156 | 155 | ui.write((b"tree %s\n" % short(ctx.changeset()[0]))) |
|
157 | 156 | for p in ctx.parents(): |
|
158 | 157 | ui.write((b"parent %s\n" % p)) |
|
159 | 158 | |
|
160 | 159 | date = ctx.date() |
|
161 | 160 | description = ctx.description().replace(b"\0", b"") |
|
162 | 161 | ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1]))) |
|
163 | 162 | |
|
164 | 163 | if b'committer' in ctx.extra(): |
|
165 | 164 | ui.write((b"committer %s\n" % ctx.extra()[b'committer'])) |
|
166 | 165 | |
|
167 | 166 | ui.write((b"revision %d\n" % ctx.rev())) |
|
168 | 167 | ui.write((b"branch %s\n" % ctx.branch())) |
|
169 | 168 | if obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
170 | 169 | if ctx.obsolete(): |
|
171 | 170 | ui.writenoi18n(b"obsolete\n") |
|
172 | 171 | ui.write((b"phase %s\n\n" % ctx.phasestr())) |
|
173 | 172 | |
|
174 | 173 | if prefix != b"": |
|
175 | 174 | ui.write( |
|
176 | 175 | b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip()) |
|
177 | 176 | ) |
|
178 | 177 | else: |
|
179 | 178 | ui.write(description + b"\n") |
|
180 | 179 | if prefix: |
|
181 | 180 | ui.write(b'\0') |
|
182 | 181 | |
|
183 | 182 | |
|
184 | 183 | @command(b'debug-merge-base', [], _(b'REV REV')) |
|
185 | 184 | def base(ui, repo, node1, node2): |
|
186 | 185 | """output common ancestor information""" |
|
187 | 186 | node1 = repo.lookup(node1) |
|
188 | 187 | node2 = repo.lookup(node2) |
|
189 | 188 | n = repo.changelog.ancestor(node1, node2) |
|
190 | 189 | ui.write(short(n) + b"\n") |
|
191 | 190 | |
|
192 | 191 | |
|
193 | 192 | @command( |
|
194 | 193 | b'debug-cat-file', |
|
195 | 194 | [(b's', b'stdin', None, _(b'stdin'))], |
|
196 | 195 | _(b'[OPTION]... TYPE FILE'), |
|
197 | 196 | inferrepo=True, |
|
198 | 197 | ) |
|
199 | 198 | def catfile(ui, repo, type=None, r=None, **opts): |
|
200 | 199 | """cat a specific revision""" |
|
201 | 200 | # in stdin mode, every line except the commit is prefixed with two |
|
202 | 201 | # spaces. This way the our caller can find the commit without magic |
|
203 | 202 | # strings |
|
204 | 203 | # |
|
205 | 204 | prefix = b"" |
|
206 | 205 | if opts['stdin']: |
|
207 | 206 | line = ui.fin.readline() |
|
208 | 207 | if not line: |
|
209 | 208 | return |
|
210 | 209 | (type, r) = line.rstrip(pycompat.oslinesep).split(b' ') |
|
211 | 210 | prefix = b" " |
|
212 | 211 | else: |
|
213 | 212 | if not type or not r: |
|
214 | 213 | ui.warn(_(b"cat-file: type or revision not supplied\n")) |
|
215 | 214 | commands.help_(ui, b'cat-file') |
|
216 | 215 | |
|
217 | 216 | while r: |
|
218 | 217 | if type != b"commit": |
|
219 | 218 | ui.warn(_(b"aborting hg cat-file only understands commits\n")) |
|
220 | 219 | return 1 |
|
221 | 220 | n = repo.lookup(r) |
|
222 | 221 | catcommit(ui, repo, n, prefix) |
|
223 | 222 | if opts['stdin']: |
|
224 | 223 | line = ui.fin.readline() |
|
225 | 224 | if not line: |
|
226 | 225 | break |
|
227 | 226 | (type, r) = line.rstrip(pycompat.oslinesep).split(b' ') |
|
228 | 227 | else: |
|
229 | 228 | break |
|
230 | 229 | |
|
231 | 230 | |
|
232 | 231 | # git rev-tree is a confusing thing. You can supply a number of |
|
233 | 232 | # commit sha1s on the command line, and it walks the commit history |
|
234 | 233 | # telling you which commits are reachable from the supplied ones via |
|
235 | 234 | # a bitmask based on arg position. |
|
236 | 235 | # you can specify a commit to stop at by starting the sha1 with ^ |
|
237 | 236 | def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False): |
|
238 | 237 | def chlogwalk(): |
|
239 | 238 | count = len(repo) |
|
240 | 239 | i = count |
|
241 | 240 | l = [0] * 100 |
|
242 | 241 | chunk = 100 |
|
243 | 242 | while True: |
|
244 | 243 | if chunk > i: |
|
245 | 244 | chunk = i |
|
246 | 245 | i = 0 |
|
247 | 246 | else: |
|
248 | 247 | i -= chunk |
|
249 | 248 | |
|
250 | 249 | for x in pycompat.xrange(chunk): |
|
251 | 250 | if i + x >= count: |
|
252 | 251 | l[chunk - x :] = [0] * (chunk - x) |
|
253 | 252 | break |
|
254 | 253 | if full is not None: |
|
255 | 254 | if (i + x) in repo: |
|
256 | 255 | l[x] = repo[i + x] |
|
257 | 256 | l[x].changeset() # force reading |
|
258 | 257 | else: |
|
259 | 258 | if (i + x) in repo: |
|
260 | 259 | l[x] = 1 |
|
261 | 260 | for x in pycompat.xrange(chunk - 1, -1, -1): |
|
262 | 261 | if l[x] != 0: |
|
263 | 262 | yield (i + x, full is not None and l[x] or None) |
|
264 | 263 | if i == 0: |
|
265 | 264 | break |
|
266 | 265 | |
|
267 | 266 | # calculate and return the reachability bitmask for sha |
|
268 | 267 | def is_reachable(ar, reachable, sha): |
|
269 | 268 | if len(ar) == 0: |
|
270 | 269 | return 1 |
|
271 | 270 | mask = 0 |
|
272 | 271 | for i in pycompat.xrange(len(ar)): |
|
273 | 272 | if sha in reachable[i]: |
|
274 | 273 | mask |= 1 << i |
|
275 | 274 | |
|
276 | 275 | return mask |
|
277 | 276 | |
|
278 | 277 | reachable = [] |
|
279 | 278 | stop_sha1 = [] |
|
280 | 279 | want_sha1 = [] |
|
281 | 280 | count = 0 |
|
282 | 281 | |
|
283 | 282 | # figure out which commits they are asking for and which ones they |
|
284 | 283 | # want us to stop on |
|
285 | 284 | for i, arg in enumerate(args): |
|
286 | 285 | if arg.startswith(b'^'): |
|
287 | 286 | s = repo.lookup(arg[1:]) |
|
288 | 287 | stop_sha1.append(s) |
|
289 | 288 | want_sha1.append(s) |
|
290 | 289 | elif arg != b'HEAD': |
|
291 | 290 | want_sha1.append(repo.lookup(arg)) |
|
292 | 291 | |
|
293 | 292 | # calculate the graph for the supplied commits |
|
294 | 293 | for i, n in enumerate(want_sha1): |
|
295 | 294 | reachable.append(set()) |
|
296 | 295 | visit = [n] |
|
297 | 296 | reachable[i].add(n) |
|
298 | 297 | while visit: |
|
299 | 298 | n = visit.pop(0) |
|
300 | 299 | if n in stop_sha1: |
|
301 | 300 | continue |
|
302 | 301 | for p in repo.changelog.parents(n): |
|
303 | 302 | if p not in reachable[i]: |
|
304 | 303 | reachable[i].add(p) |
|
305 | 304 | visit.append(p) |
|
306 | 305 | if p in stop_sha1: |
|
307 | 306 | continue |
|
308 | 307 | |
|
309 | 308 | # walk the repository looking for commits that are in our |
|
310 | 309 | # reachability graph |
|
311 | 310 | for i, ctx in chlogwalk(): |
|
312 | 311 | if i not in repo: |
|
313 | 312 | continue |
|
314 | 313 | n = repo.changelog.node(i) |
|
315 | 314 | mask = is_reachable(want_sha1, reachable, n) |
|
316 | 315 | if mask: |
|
317 | 316 | parentstr = b"" |
|
318 | 317 | if parents: |
|
319 | 318 | pp = repo.changelog.parents(n) |
|
320 | if pp[0] != nullid: | |
|
319 | if pp[0] != repo.nullid: | |
|
321 | 320 | parentstr += b" " + short(pp[0]) |
|
322 | if pp[1] != nullid: | |
|
321 | if pp[1] != repo.nullid: | |
|
323 | 322 | parentstr += b" " + short(pp[1]) |
|
324 | 323 | if not full: |
|
325 | 324 | ui.write(b"%s%s\n" % (short(n), parentstr)) |
|
326 | 325 | elif full == b"commit": |
|
327 | 326 | ui.write(b"%s%s\n" % (short(n), parentstr)) |
|
328 | 327 | catcommit(ui, repo, n, b' ', ctx) |
|
329 | 328 | else: |
|
330 | 329 | (p1, p2) = repo.changelog.parents(n) |
|
331 | 330 | (h, h1, h2) = map(short, (n, p1, p2)) |
|
332 | 331 | (i1, i2) = map(repo.changelog.rev, (p1, p2)) |
|
333 | 332 | |
|
334 | 333 | date = ctx.date()[0] |
|
335 | 334 | ui.write(b"%s %s:%s" % (date, h, mask)) |
|
336 | 335 | mask = is_reachable(want_sha1, reachable, p1) |
|
337 | 336 | if i1 != nullrev and mask > 0: |
|
338 | 337 | ui.write(b"%s:%s " % (h1, mask)), |
|
339 | 338 | mask = is_reachable(want_sha1, reachable, p2) |
|
340 | 339 | if i2 != nullrev and mask > 0: |
|
341 | 340 | ui.write(b"%s:%s " % (h2, mask)) |
|
342 | 341 | ui.write(b"\n") |
|
343 | 342 | if maxnr and count >= maxnr: |
|
344 | 343 | break |
|
345 | 344 | count += 1 |
|
346 | 345 | |
|
347 | 346 | |
|
348 | 347 | # git rev-list tries to order things by date, and has the ability to stop |
|
349 | 348 | # at a given commit without walking the whole repo. TODO add the stop |
|
350 | 349 | # parameter |
|
351 | 350 | @command( |
|
352 | 351 | b'debug-rev-list', |
|
353 | 352 | [ |
|
354 | 353 | (b'H', b'header', None, _(b'header')), |
|
355 | 354 | (b't', b'topo-order', None, _(b'topo-order')), |
|
356 | 355 | (b'p', b'parents', None, _(b'parents')), |
|
357 | 356 | (b'n', b'max-count', 0, _(b'max-count')), |
|
358 | 357 | ], |
|
359 | 358 | b'[OPTION]... REV...', |
|
360 | 359 | ) |
|
361 | 360 | def revlist(ui, repo, *revs, **opts): |
|
362 | 361 | """print revisions""" |
|
363 | 362 | if opts['header']: |
|
364 | 363 | full = b"commit" |
|
365 | 364 | else: |
|
366 | 365 | full = None |
|
367 | 366 | copy = [x for x in revs] |
|
368 | 367 | revtree(ui, copy, repo, full, opts['max_count'], opts[r'parents']) |
|
369 | 368 | |
|
370 | 369 | |
|
371 | 370 | @command( |
|
372 | 371 | b'view', |
|
373 | 372 | [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))], |
|
374 | 373 | _(b'[-l LIMIT] [REVRANGE]'), |
|
375 | 374 | helpcategory=command.CATEGORY_CHANGE_NAVIGATION, |
|
376 | 375 | ) |
|
377 | 376 | def view(ui, repo, *etc, **opts): |
|
378 | 377 | """start interactive history viewer""" |
|
379 | 378 | opts = pycompat.byteskwargs(opts) |
|
380 | 379 | os.chdir(repo.root) |
|
381 | 380 | optstr = b' '.join( |
|
382 | 381 | [b'--%s %s' % (k, v) for k, v in pycompat.iteritems(opts) if v] |
|
383 | 382 | ) |
|
384 | 383 | if repo.filtername is None: |
|
385 | 384 | optstr += b'--hidden' |
|
386 | 385 | |
|
387 | 386 | cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc)) |
|
388 | 387 | ui.debug(b"running %s\n" % cmd) |
|
389 | 388 | ui.system(cmd, blockedtag=b'hgk_view') |
@@ -1,608 +1,607 b'' | |||
|
1 | 1 | # journal.py |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2014-2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """track previous positions of bookmarks (EXPERIMENTAL) |
|
8 | 8 | |
|
9 | 9 | This extension adds a new command: `hg journal`, which shows you where |
|
10 | 10 | bookmarks were previously located. |
|
11 | 11 | |
|
12 | 12 | """ |
|
13 | 13 | |
|
14 | 14 | from __future__ import absolute_import |
|
15 | 15 | |
|
16 | 16 | import collections |
|
17 | 17 | import errno |
|
18 | 18 | import os |
|
19 | 19 | import weakref |
|
20 | 20 | |
|
21 | 21 | from mercurial.i18n import _ |
|
22 | 22 | from mercurial.node import ( |
|
23 | 23 | bin, |
|
24 | 24 | hex, |
|
25 | nullid, | |
|
26 | 25 | ) |
|
27 | 26 | |
|
28 | 27 | from mercurial import ( |
|
29 | 28 | bookmarks, |
|
30 | 29 | cmdutil, |
|
31 | 30 | dispatch, |
|
32 | 31 | encoding, |
|
33 | 32 | error, |
|
34 | 33 | extensions, |
|
35 | 34 | hg, |
|
36 | 35 | localrepo, |
|
37 | 36 | lock, |
|
38 | 37 | logcmdutil, |
|
39 | 38 | pycompat, |
|
40 | 39 | registrar, |
|
41 | 40 | util, |
|
42 | 41 | ) |
|
43 | 42 | from mercurial.utils import ( |
|
44 | 43 | dateutil, |
|
45 | 44 | procutil, |
|
46 | 45 | stringutil, |
|
47 | 46 | ) |
|
48 | 47 | |
|
49 | 48 | cmdtable = {} |
|
50 | 49 | command = registrar.command(cmdtable) |
|
51 | 50 | |
|
52 | 51 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
53 | 52 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
54 | 53 | # be specifying the version(s) of Mercurial they are tested with, or |
|
55 | 54 | # leave the attribute unspecified. |
|
56 | 55 | testedwith = b'ships-with-hg-core' |
|
57 | 56 | |
|
58 | 57 | # storage format version; increment when the format changes |
|
59 | 58 | storageversion = 0 |
|
60 | 59 | |
|
61 | 60 | # namespaces |
|
62 | 61 | bookmarktype = b'bookmark' |
|
63 | 62 | wdirparenttype = b'wdirparent' |
|
64 | 63 | # In a shared repository, what shared feature name is used |
|
65 | 64 | # to indicate this namespace is shared with the source? |
|
66 | 65 | sharednamespaces = { |
|
67 | 66 | bookmarktype: hg.sharedbookmarks, |
|
68 | 67 | } |
|
69 | 68 | |
|
70 | 69 | # Journal recording, register hooks and storage object |
|
71 | 70 | def extsetup(ui): |
|
72 | 71 | extensions.wrapfunction(dispatch, b'runcommand', runcommand) |
|
73 | 72 | extensions.wrapfunction(bookmarks.bmstore, b'_write', recordbookmarks) |
|
74 | 73 | extensions.wrapfilecache( |
|
75 | 74 | localrepo.localrepository, b'dirstate', wrapdirstate |
|
76 | 75 | ) |
|
77 | 76 | extensions.wrapfunction(hg, b'postshare', wrappostshare) |
|
78 | 77 | extensions.wrapfunction(hg, b'copystore', unsharejournal) |
|
79 | 78 | |
|
80 | 79 | |
|
81 | 80 | def reposetup(ui, repo): |
|
82 | 81 | if repo.local(): |
|
83 | 82 | repo.journal = journalstorage(repo) |
|
84 | 83 | repo._wlockfreeprefix.add(b'namejournal') |
|
85 | 84 | |
|
86 | 85 | dirstate, cached = localrepo.isfilecached(repo, b'dirstate') |
|
87 | 86 | if cached: |
|
88 | 87 | # already instantiated dirstate isn't yet marked as |
|
89 | 88 | # "journal"-ing, even though repo.dirstate() was already |
|
90 | 89 | # wrapped by own wrapdirstate() |
|
91 | 90 | _setupdirstate(repo, dirstate) |
|
92 | 91 | |
|
93 | 92 | |
|
94 | 93 | def runcommand(orig, lui, repo, cmd, fullargs, *args): |
|
95 | 94 | """Track the command line options for recording in the journal""" |
|
96 | 95 | journalstorage.recordcommand(*fullargs) |
|
97 | 96 | return orig(lui, repo, cmd, fullargs, *args) |
|
98 | 97 | |
|
99 | 98 | |
|
100 | 99 | def _setupdirstate(repo, dirstate): |
|
101 | 100 | dirstate.journalstorage = repo.journal |
|
102 | 101 | dirstate.addparentchangecallback(b'journal', recorddirstateparents) |
|
103 | 102 | |
|
104 | 103 | |
|
105 | 104 | # hooks to record dirstate changes |
|
106 | 105 | def wrapdirstate(orig, repo): |
|
107 | 106 | """Make journal storage available to the dirstate object""" |
|
108 | 107 | dirstate = orig(repo) |
|
109 | 108 | if util.safehasattr(repo, 'journal'): |
|
110 | 109 | _setupdirstate(repo, dirstate) |
|
111 | 110 | return dirstate |
|
112 | 111 | |
|
113 | 112 | |
|
114 | 113 | def recorddirstateparents(dirstate, old, new): |
|
115 | 114 | """Records all dirstate parent changes in the journal.""" |
|
116 | 115 | old = list(old) |
|
117 | 116 | new = list(new) |
|
118 | 117 | if util.safehasattr(dirstate, 'journalstorage'): |
|
119 | 118 | # only record two hashes if there was a merge |
|
120 | oldhashes = old[:1] if old[1] == nullid else old | |
|
121 | newhashes = new[:1] if new[1] == nullid else new | |
|
119 | oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old | |
|
120 | newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new | |
|
122 | 121 | dirstate.journalstorage.record( |
|
123 | 122 | wdirparenttype, b'.', oldhashes, newhashes |
|
124 | 123 | ) |
|
125 | 124 | |
|
126 | 125 | |
|
127 | 126 | # hooks to record bookmark changes (both local and remote) |
|
128 | 127 | def recordbookmarks(orig, store, fp): |
|
129 | 128 | """Records all bookmark changes in the journal.""" |
|
130 | 129 | repo = store._repo |
|
131 | 130 | if util.safehasattr(repo, 'journal'): |
|
132 | 131 | oldmarks = bookmarks.bmstore(repo) |
|
133 | 132 | for mark, value in pycompat.iteritems(store): |
|
134 | oldvalue = oldmarks.get(mark, nullid) | |
|
133 | oldvalue = oldmarks.get(mark, repo.nullid) | |
|
135 | 134 | if value != oldvalue: |
|
136 | 135 | repo.journal.record(bookmarktype, mark, oldvalue, value) |
|
137 | 136 | return orig(store, fp) |
|
138 | 137 | |
|
139 | 138 | |
|
140 | 139 | # shared repository support |
|
141 | 140 | def _readsharedfeatures(repo): |
|
142 | 141 | """A set of shared features for this repository""" |
|
143 | 142 | try: |
|
144 | 143 | return set(repo.vfs.read(b'shared').splitlines()) |
|
145 | 144 | except IOError as inst: |
|
146 | 145 | if inst.errno != errno.ENOENT: |
|
147 | 146 | raise |
|
148 | 147 | return set() |
|
149 | 148 | |
|
150 | 149 | |
|
151 | 150 | def _mergeentriesiter(*iterables, **kwargs): |
|
152 | 151 | """Given a set of sorted iterables, yield the next entry in merged order |
|
153 | 152 | |
|
154 | 153 | Note that by default entries go from most recent to oldest. |
|
155 | 154 | """ |
|
156 | 155 | order = kwargs.pop('order', max) |
|
157 | 156 | iterables = [iter(it) for it in iterables] |
|
158 | 157 | # this tracks still active iterables; iterables are deleted as they are |
|
159 | 158 | # exhausted, which is why this is a dictionary and why each entry also |
|
160 | 159 | # stores the key. Entries are mutable so we can store the next value each |
|
161 | 160 | # time. |
|
162 | 161 | iterable_map = {} |
|
163 | 162 | for key, it in enumerate(iterables): |
|
164 | 163 | try: |
|
165 | 164 | iterable_map[key] = [next(it), key, it] |
|
166 | 165 | except StopIteration: |
|
167 | 166 | # empty entry, can be ignored |
|
168 | 167 | pass |
|
169 | 168 | |
|
170 | 169 | while iterable_map: |
|
171 | 170 | value, key, it = order(pycompat.itervalues(iterable_map)) |
|
172 | 171 | yield value |
|
173 | 172 | try: |
|
174 | 173 | iterable_map[key][0] = next(it) |
|
175 | 174 | except StopIteration: |
|
176 | 175 | # this iterable is empty, remove it from consideration |
|
177 | 176 | del iterable_map[key] |
|
178 | 177 | |
|
179 | 178 | |
|
180 | 179 | def wrappostshare(orig, sourcerepo, destrepo, **kwargs): |
|
181 | 180 | """Mark this shared working copy as sharing journal information""" |
|
182 | 181 | with destrepo.wlock(): |
|
183 | 182 | orig(sourcerepo, destrepo, **kwargs) |
|
184 | 183 | with destrepo.vfs(b'shared', b'a') as fp: |
|
185 | 184 | fp.write(b'journal\n') |
|
186 | 185 | |
|
187 | 186 | |
|
188 | 187 | def unsharejournal(orig, ui, repo, repopath): |
|
189 | 188 | """Copy shared journal entries into this repo when unsharing""" |
|
190 | 189 | if ( |
|
191 | 190 | repo.path == repopath |
|
192 | 191 | and repo.shared() |
|
193 | 192 | and util.safehasattr(repo, 'journal') |
|
194 | 193 | ): |
|
195 | 194 | sharedrepo = hg.sharedreposource(repo) |
|
196 | 195 | sharedfeatures = _readsharedfeatures(repo) |
|
197 | 196 | if sharedrepo and sharedfeatures > {b'journal'}: |
|
198 | 197 | # there is a shared repository and there are shared journal entries |
|
199 | 198 | # to copy. move shared date over from source to destination but |
|
200 | 199 | # move the local file first |
|
201 | 200 | if repo.vfs.exists(b'namejournal'): |
|
202 | 201 | journalpath = repo.vfs.join(b'namejournal') |
|
203 | 202 | util.rename(journalpath, journalpath + b'.bak') |
|
204 | 203 | storage = repo.journal |
|
205 | 204 | local = storage._open( |
|
206 | 205 | repo.vfs, filename=b'namejournal.bak', _newestfirst=False |
|
207 | 206 | ) |
|
208 | 207 | shared = ( |
|
209 | 208 | e |
|
210 | 209 | for e in storage._open(sharedrepo.vfs, _newestfirst=False) |
|
211 | 210 | if sharednamespaces.get(e.namespace) in sharedfeatures |
|
212 | 211 | ) |
|
213 | 212 | for entry in _mergeentriesiter(local, shared, order=min): |
|
214 | 213 | storage._write(repo.vfs, entry) |
|
215 | 214 | |
|
216 | 215 | return orig(ui, repo, repopath) |
|
217 | 216 | |
|
218 | 217 | |
|
219 | 218 | class journalentry( |
|
220 | 219 | collections.namedtuple( |
|
221 | 220 | 'journalentry', |
|
222 | 221 | 'timestamp user command namespace name oldhashes newhashes', |
|
223 | 222 | ) |
|
224 | 223 | ): |
|
225 | 224 | """Individual journal entry |
|
226 | 225 | |
|
227 | 226 | * timestamp: a mercurial (time, timezone) tuple |
|
228 | 227 | * user: the username that ran the command |
|
229 | 228 | * namespace: the entry namespace, an opaque string |
|
230 | 229 | * name: the name of the changed item, opaque string with meaning in the |
|
231 | 230 | namespace |
|
232 | 231 | * command: the hg command that triggered this record |
|
233 | 232 | * oldhashes: a tuple of one or more binary hashes for the old location |
|
234 | 233 | * newhashes: a tuple of one or more binary hashes for the new location |
|
235 | 234 | |
|
236 | 235 | Handles serialisation from and to the storage format. Fields are |
|
237 | 236 | separated by newlines, hashes are written out in hex separated by commas, |
|
238 | 237 | timestamp and timezone are separated by a space. |
|
239 | 238 | |
|
240 | 239 | """ |
|
241 | 240 | |
|
242 | 241 | @classmethod |
|
243 | 242 | def fromstorage(cls, line): |
|
244 | 243 | ( |
|
245 | 244 | time, |
|
246 | 245 | user, |
|
247 | 246 | command, |
|
248 | 247 | namespace, |
|
249 | 248 | name, |
|
250 | 249 | oldhashes, |
|
251 | 250 | newhashes, |
|
252 | 251 | ) = line.split(b'\n') |
|
253 | 252 | timestamp, tz = time.split() |
|
254 | 253 | timestamp, tz = float(timestamp), int(tz) |
|
255 | 254 | oldhashes = tuple(bin(hash) for hash in oldhashes.split(b',')) |
|
256 | 255 | newhashes = tuple(bin(hash) for hash in newhashes.split(b',')) |
|
257 | 256 | return cls( |
|
258 | 257 | (timestamp, tz), |
|
259 | 258 | user, |
|
260 | 259 | command, |
|
261 | 260 | namespace, |
|
262 | 261 | name, |
|
263 | 262 | oldhashes, |
|
264 | 263 | newhashes, |
|
265 | 264 | ) |
|
266 | 265 | |
|
267 | 266 | def __bytes__(self): |
|
268 | 267 | """bytes representation for storage""" |
|
269 | 268 | time = b' '.join(map(pycompat.bytestr, self.timestamp)) |
|
270 | 269 | oldhashes = b','.join([hex(hash) for hash in self.oldhashes]) |
|
271 | 270 | newhashes = b','.join([hex(hash) for hash in self.newhashes]) |
|
272 | 271 | return b'\n'.join( |
|
273 | 272 | ( |
|
274 | 273 | time, |
|
275 | 274 | self.user, |
|
276 | 275 | self.command, |
|
277 | 276 | self.namespace, |
|
278 | 277 | self.name, |
|
279 | 278 | oldhashes, |
|
280 | 279 | newhashes, |
|
281 | 280 | ) |
|
282 | 281 | ) |
|
283 | 282 | |
|
284 | 283 | __str__ = encoding.strmethod(__bytes__) |
|
285 | 284 | |
|
286 | 285 | |
|
287 | 286 | class journalstorage(object): |
|
288 | 287 | """Storage for journal entries |
|
289 | 288 | |
|
290 | 289 | Entries are divided over two files; one with entries that pertain to the |
|
291 | 290 | local working copy *only*, and one with entries that are shared across |
|
292 | 291 | multiple working copies when shared using the share extension. |
|
293 | 292 | |
|
294 | 293 | Entries are stored with NUL bytes as separators. See the journalentry |
|
295 | 294 | class for the per-entry structure. |
|
296 | 295 | |
|
297 | 296 | The file format starts with an integer version, delimited by a NUL. |
|
298 | 297 | |
|
299 | 298 | This storage uses a dedicated lock; this makes it easier to avoid issues |
|
300 | 299 | with adding entries that added when the regular wlock is unlocked (e.g. |
|
301 | 300 | the dirstate). |
|
302 | 301 | |
|
303 | 302 | """ |
|
304 | 303 | |
|
305 | 304 | _currentcommand = () |
|
306 | 305 | _lockref = None |
|
307 | 306 | |
|
308 | 307 | def __init__(self, repo): |
|
309 | 308 | self.user = procutil.getuser() |
|
310 | 309 | self.ui = repo.ui |
|
311 | 310 | self.vfs = repo.vfs |
|
312 | 311 | |
|
313 | 312 | # is this working copy using a shared storage? |
|
314 | 313 | self.sharedfeatures = self.sharedvfs = None |
|
315 | 314 | if repo.shared(): |
|
316 | 315 | features = _readsharedfeatures(repo) |
|
317 | 316 | sharedrepo = hg.sharedreposource(repo) |
|
318 | 317 | if sharedrepo is not None and b'journal' in features: |
|
319 | 318 | self.sharedvfs = sharedrepo.vfs |
|
320 | 319 | self.sharedfeatures = features |
|
321 | 320 | |
|
322 | 321 | # track the current command for recording in journal entries |
|
323 | 322 | @property |
|
324 | 323 | def command(self): |
|
325 | 324 | commandstr = b' '.join( |
|
326 | 325 | map(procutil.shellquote, journalstorage._currentcommand) |
|
327 | 326 | ) |
|
328 | 327 | if b'\n' in commandstr: |
|
329 | 328 | # truncate multi-line commands |
|
330 | 329 | commandstr = commandstr.partition(b'\n')[0] + b' ...' |
|
331 | 330 | return commandstr |
|
332 | 331 | |
|
333 | 332 | @classmethod |
|
334 | 333 | def recordcommand(cls, *fullargs): |
|
335 | 334 | """Set the current hg arguments, stored with recorded entries""" |
|
336 | 335 | # Set the current command on the class because we may have started |
|
337 | 336 | # with a non-local repo (cloning for example). |
|
338 | 337 | cls._currentcommand = fullargs |
|
339 | 338 | |
|
340 | 339 | def _currentlock(self, lockref): |
|
341 | 340 | """Returns the lock if it's held, or None if it's not. |
|
342 | 341 | |
|
343 | 342 | (This is copied from the localrepo class) |
|
344 | 343 | """ |
|
345 | 344 | if lockref is None: |
|
346 | 345 | return None |
|
347 | 346 | l = lockref() |
|
348 | 347 | if l is None or not l.held: |
|
349 | 348 | return None |
|
350 | 349 | return l |
|
351 | 350 | |
|
352 | 351 | def jlock(self, vfs): |
|
353 | 352 | """Create a lock for the journal file""" |
|
354 | 353 | if self._currentlock(self._lockref) is not None: |
|
355 | 354 | raise error.Abort(_(b'journal lock does not support nesting')) |
|
356 | 355 | desc = _(b'journal of %s') % vfs.base |
|
357 | 356 | try: |
|
358 | 357 | l = lock.lock(vfs, b'namejournal.lock', 0, desc=desc) |
|
359 | 358 | except error.LockHeld as inst: |
|
360 | 359 | self.ui.warn( |
|
361 | 360 | _(b"waiting for lock on %s held by %r\n") % (desc, inst.locker) |
|
362 | 361 | ) |
|
363 | 362 | # default to 600 seconds timeout |
|
364 | 363 | l = lock.lock( |
|
365 | 364 | vfs, |
|
366 | 365 | b'namejournal.lock', |
|
367 | 366 | self.ui.configint(b"ui", b"timeout"), |
|
368 | 367 | desc=desc, |
|
369 | 368 | ) |
|
370 | 369 | self.ui.warn(_(b"got lock after %s seconds\n") % l.delay) |
|
371 | 370 | self._lockref = weakref.ref(l) |
|
372 | 371 | return l |
|
373 | 372 | |
|
374 | 373 | def record(self, namespace, name, oldhashes, newhashes): |
|
375 | 374 | """Record a new journal entry |
|
376 | 375 | |
|
377 | 376 | * namespace: an opaque string; this can be used to filter on the type |
|
378 | 377 | of recorded entries. |
|
379 | 378 | * name: the name defining this entry; for bookmarks, this is the |
|
380 | 379 | bookmark name. Can be filtered on when retrieving entries. |
|
381 | 380 | * oldhashes and newhashes: each a single binary hash, or a list of |
|
382 | 381 | binary hashes. These represent the old and new position of the named |
|
383 | 382 | item. |
|
384 | 383 | |
|
385 | 384 | """ |
|
386 | 385 | if not isinstance(oldhashes, list): |
|
387 | 386 | oldhashes = [oldhashes] |
|
388 | 387 | if not isinstance(newhashes, list): |
|
389 | 388 | newhashes = [newhashes] |
|
390 | 389 | |
|
391 | 390 | entry = journalentry( |
|
392 | 391 | dateutil.makedate(), |
|
393 | 392 | self.user, |
|
394 | 393 | self.command, |
|
395 | 394 | namespace, |
|
396 | 395 | name, |
|
397 | 396 | oldhashes, |
|
398 | 397 | newhashes, |
|
399 | 398 | ) |
|
400 | 399 | |
|
401 | 400 | vfs = self.vfs |
|
402 | 401 | if self.sharedvfs is not None: |
|
403 | 402 | # write to the shared repository if this feature is being |
|
404 | 403 | # shared between working copies. |
|
405 | 404 | if sharednamespaces.get(namespace) in self.sharedfeatures: |
|
406 | 405 | vfs = self.sharedvfs |
|
407 | 406 | |
|
408 | 407 | self._write(vfs, entry) |
|
409 | 408 | |
|
410 | 409 | def _write(self, vfs, entry): |
|
411 | 410 | with self.jlock(vfs): |
|
412 | 411 | # open file in amend mode to ensure it is created if missing |
|
413 | 412 | with vfs(b'namejournal', mode=b'a+b') as f: |
|
414 | 413 | f.seek(0, os.SEEK_SET) |
|
415 | 414 | # Read just enough bytes to get a version number (up to 2 |
|
416 | 415 | # digits plus separator) |
|
417 | 416 | version = f.read(3).partition(b'\0')[0] |
|
418 | 417 | if version and version != b"%d" % storageversion: |
|
419 | 418 | # different version of the storage. Exit early (and not |
|
420 | 419 | # write anything) if this is not a version we can handle or |
|
421 | 420 | # the file is corrupt. In future, perhaps rotate the file |
|
422 | 421 | # instead? |
|
423 | 422 | self.ui.warn( |
|
424 | 423 | _(b"unsupported journal file version '%s'\n") % version |
|
425 | 424 | ) |
|
426 | 425 | return |
|
427 | 426 | if not version: |
|
428 | 427 | # empty file, write version first |
|
429 | 428 | f.write((b"%d" % storageversion) + b'\0') |
|
430 | 429 | f.seek(0, os.SEEK_END) |
|
431 | 430 | f.write(bytes(entry) + b'\0') |
|
432 | 431 | |
|
433 | 432 | def filtered(self, namespace=None, name=None): |
|
434 | 433 | """Yield all journal entries with the given namespace or name |
|
435 | 434 | |
|
436 | 435 | Both the namespace and the name are optional; if neither is given all |
|
437 | 436 | entries in the journal are produced. |
|
438 | 437 | |
|
439 | 438 | Matching supports regular expressions by using the `re:` prefix |
|
440 | 439 | (use `literal:` to match names or namespaces that start with `re:`) |
|
441 | 440 | |
|
442 | 441 | """ |
|
443 | 442 | if namespace is not None: |
|
444 | 443 | namespace = stringutil.stringmatcher(namespace)[-1] |
|
445 | 444 | if name is not None: |
|
446 | 445 | name = stringutil.stringmatcher(name)[-1] |
|
447 | 446 | for entry in self: |
|
448 | 447 | if namespace is not None and not namespace(entry.namespace): |
|
449 | 448 | continue |
|
450 | 449 | if name is not None and not name(entry.name): |
|
451 | 450 | continue |
|
452 | 451 | yield entry |
|
453 | 452 | |
|
454 | 453 | def __iter__(self): |
|
455 | 454 | """Iterate over the storage |
|
456 | 455 | |
|
457 | 456 | Yields journalentry instances for each contained journal record. |
|
458 | 457 | |
|
459 | 458 | """ |
|
460 | 459 | local = self._open(self.vfs) |
|
461 | 460 | |
|
462 | 461 | if self.sharedvfs is None: |
|
463 | 462 | return local |
|
464 | 463 | |
|
465 | 464 | # iterate over both local and shared entries, but only those |
|
466 | 465 | # shared entries that are among the currently shared features |
|
467 | 466 | shared = ( |
|
468 | 467 | e |
|
469 | 468 | for e in self._open(self.sharedvfs) |
|
470 | 469 | if sharednamespaces.get(e.namespace) in self.sharedfeatures |
|
471 | 470 | ) |
|
472 | 471 | return _mergeentriesiter(local, shared) |
|
473 | 472 | |
|
474 | 473 | def _open(self, vfs, filename=b'namejournal', _newestfirst=True): |
|
475 | 474 | if not vfs.exists(filename): |
|
476 | 475 | return |
|
477 | 476 | |
|
478 | 477 | with vfs(filename) as f: |
|
479 | 478 | raw = f.read() |
|
480 | 479 | |
|
481 | 480 | lines = raw.split(b'\0') |
|
482 | 481 | version = lines and lines[0] |
|
483 | 482 | if version != b"%d" % storageversion: |
|
484 | 483 | version = version or _(b'not available') |
|
485 | 484 | raise error.Abort(_(b"unknown journal file version '%s'") % version) |
|
486 | 485 | |
|
487 | 486 | # Skip the first line, it's a version number. Normally we iterate over |
|
488 | 487 | # these in reverse order to list newest first; only when copying across |
|
489 | 488 | # a shared storage do we forgo reversing. |
|
490 | 489 | lines = lines[1:] |
|
491 | 490 | if _newestfirst: |
|
492 | 491 | lines = reversed(lines) |
|
493 | 492 | for line in lines: |
|
494 | 493 | if not line: |
|
495 | 494 | continue |
|
496 | 495 | yield journalentry.fromstorage(line) |
|
497 | 496 | |
|
498 | 497 | |
|
499 | 498 | # journal reading |
|
500 | 499 | # log options that don't make sense for journal |
|
501 | 500 | _ignoreopts = (b'no-merges', b'graph') |
|
502 | 501 | |
|
503 | 502 | |
|
504 | 503 | @command( |
|
505 | 504 | b'journal', |
|
506 | 505 | [ |
|
507 | 506 | (b'', b'all', None, b'show history for all names'), |
|
508 | 507 | (b'c', b'commits', None, b'show commit metadata'), |
|
509 | 508 | ] |
|
510 | 509 | + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts], |
|
511 | 510 | b'[OPTION]... [BOOKMARKNAME]', |
|
512 | 511 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
513 | 512 | ) |
|
514 | 513 | def journal(ui, repo, *args, **opts): |
|
515 | 514 | """show the previous position of bookmarks and the working copy |
|
516 | 515 | |
|
517 | 516 | The journal is used to see the previous commits that bookmarks and the |
|
518 | 517 | working copy pointed to. By default the previous locations for the working |
|
519 | 518 | copy. Passing a bookmark name will show all the previous positions of |
|
520 | 519 | that bookmark. Use the --all switch to show previous locations for all |
|
521 | 520 | bookmarks and the working copy; each line will then include the bookmark |
|
522 | 521 | name, or '.' for the working copy, as well. |
|
523 | 522 | |
|
524 | 523 | If `name` starts with `re:`, the remainder of the name is treated as |
|
525 | 524 | a regular expression. To match a name that actually starts with `re:`, |
|
526 | 525 | use the prefix `literal:`. |
|
527 | 526 | |
|
528 | 527 | By default hg journal only shows the commit hash and the command that was |
|
529 | 528 | running at that time. -v/--verbose will show the prior hash, the user, and |
|
530 | 529 | the time at which it happened. |
|
531 | 530 | |
|
532 | 531 | Use -c/--commits to output log information on each commit hash; at this |
|
533 | 532 | point you can use the usual `--patch`, `--git`, `--stat` and `--template` |
|
534 | 533 | switches to alter the log output for these. |
|
535 | 534 | |
|
536 | 535 | `hg journal -T json` can be used to produce machine readable output. |
|
537 | 536 | |
|
538 | 537 | """ |
|
539 | 538 | opts = pycompat.byteskwargs(opts) |
|
540 | 539 | name = b'.' |
|
541 | 540 | if opts.get(b'all'): |
|
542 | 541 | if args: |
|
543 | 542 | raise error.Abort( |
|
544 | 543 | _(b"You can't combine --all and filtering on a name") |
|
545 | 544 | ) |
|
546 | 545 | name = None |
|
547 | 546 | if args: |
|
548 | 547 | name = args[0] |
|
549 | 548 | |
|
550 | 549 | fm = ui.formatter(b'journal', opts) |
|
551 | 550 | |
|
552 | 551 | def formatnodes(nodes): |
|
553 | 552 | return fm.formatlist(map(fm.hexfunc, nodes), name=b'node', sep=b',') |
|
554 | 553 | |
|
555 | 554 | if opts.get(b"template") != b"json": |
|
556 | 555 | if name is None: |
|
557 | 556 | displayname = _(b'the working copy and bookmarks') |
|
558 | 557 | else: |
|
559 | 558 | displayname = b"'%s'" % name |
|
560 | 559 | ui.status(_(b"previous locations of %s:\n") % displayname) |
|
561 | 560 | |
|
562 | 561 | limit = logcmdutil.getlimit(opts) |
|
563 | 562 | entry = None |
|
564 | 563 | ui.pager(b'journal') |
|
565 | 564 | for count, entry in enumerate(repo.journal.filtered(name=name)): |
|
566 | 565 | if count == limit: |
|
567 | 566 | break |
|
568 | 567 | |
|
569 | 568 | fm.startitem() |
|
570 | 569 | fm.condwrite( |
|
571 | 570 | ui.verbose, b'oldnodes', b'%s -> ', formatnodes(entry.oldhashes) |
|
572 | 571 | ) |
|
573 | 572 | fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes)) |
|
574 | 573 | fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user) |
|
575 | 574 | fm.condwrite( |
|
576 | 575 | opts.get(b'all') or name.startswith(b're:'), |
|
577 | 576 | b'name', |
|
578 | 577 | b' %-8s', |
|
579 | 578 | entry.name, |
|
580 | 579 | ) |
|
581 | 580 | |
|
582 | 581 | fm.condwrite( |
|
583 | 582 | ui.verbose, |
|
584 | 583 | b'date', |
|
585 | 584 | b' %s', |
|
586 | 585 | fm.formatdate(entry.timestamp, b'%Y-%m-%d %H:%M %1%2'), |
|
587 | 586 | ) |
|
588 | 587 | fm.write(b'command', b' %s\n', entry.command) |
|
589 | 588 | |
|
590 | 589 | if opts.get(b"commits"): |
|
591 | 590 | if fm.isplain(): |
|
592 | 591 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
593 | 592 | else: |
|
594 | 593 | displayer = logcmdutil.changesetformatter( |
|
595 | 594 | ui, repo, fm.nested(b'changesets'), diffopts=opts |
|
596 | 595 | ) |
|
597 | 596 | for hash in entry.newhashes: |
|
598 | 597 | try: |
|
599 | 598 | ctx = repo[hash] |
|
600 | 599 | displayer.show(ctx) |
|
601 | 600 | except error.RepoLookupError as e: |
|
602 | 601 | fm.plain(b"%s\n\n" % pycompat.bytestr(e)) |
|
603 | 602 | displayer.close() |
|
604 | 603 | |
|
605 | 604 | fm.end() |
|
606 | 605 | |
|
607 | 606 | if entry is None: |
|
608 | 607 | ui.status(_(b"no recorded locations\n")) |
@@ -1,182 +1,183 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''base class for store implementations and store-related utility code''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | |
|
14 |
from mercurial import |
|
|
14 | from mercurial.node import short | |
|
15 | from mercurial import util | |
|
15 | 16 | from mercurial.utils import ( |
|
16 | 17 | urlutil, |
|
17 | 18 | ) |
|
18 | 19 | |
|
19 | 20 | from . import lfutil |
|
20 | 21 | |
|
21 | 22 | |
|
22 | 23 | class StoreError(Exception): |
|
23 | 24 | """Raised when there is a problem getting files from or putting |
|
24 | 25 | files to a central store.""" |
|
25 | 26 | |
|
26 | 27 | def __init__(self, filename, hash, url, detail): |
|
27 | 28 | self.filename = filename |
|
28 | 29 | self.hash = hash |
|
29 | 30 | self.url = url |
|
30 | 31 | self.detail = detail |
|
31 | 32 | |
|
32 | 33 | def longmessage(self): |
|
33 | 34 | return _(b"error getting id %s from url %s for file %s: %s\n") % ( |
|
34 | 35 | self.hash, |
|
35 | 36 | urlutil.hidepassword(self.url), |
|
36 | 37 | self.filename, |
|
37 | 38 | self.detail, |
|
38 | 39 | ) |
|
39 | 40 | |
|
40 | 41 | def __str__(self): |
|
41 | 42 | return b"%s: %s" % (urlutil.hidepassword(self.url), self.detail) |
|
42 | 43 | |
|
43 | 44 | |
|
44 | 45 | class basestore(object): |
|
45 | 46 | def __init__(self, ui, repo, url): |
|
46 | 47 | self.ui = ui |
|
47 | 48 | self.repo = repo |
|
48 | 49 | self.url = url |
|
49 | 50 | |
|
50 | 51 | def put(self, source, hash): |
|
51 | 52 | '''Put source file into the store so it can be retrieved by hash.''' |
|
52 | 53 | raise NotImplementedError(b'abstract method') |
|
53 | 54 | |
|
54 | 55 | def exists(self, hashes): |
|
55 | 56 | """Check to see if the store contains the given hashes. Given an |
|
56 | 57 | iterable of hashes it returns a mapping from hash to bool.""" |
|
57 | 58 | raise NotImplementedError(b'abstract method') |
|
58 | 59 | |
|
59 | 60 | def get(self, files): |
|
60 | 61 | """Get the specified largefiles from the store and write to local |
|
61 | 62 | files under repo.root. files is a list of (filename, hash) |
|
62 | 63 | tuples. Return (success, missing), lists of files successfully |
|
63 | 64 | downloaded and those not found in the store. success is a list |
|
64 | 65 | of (filename, hash) tuples; missing is a list of filenames that |
|
65 | 66 | we could not get. (The detailed error message will already have |
|
66 | 67 | been presented to the user, so missing is just supplied as a |
|
67 | 68 | summary.)""" |
|
68 | 69 | success = [] |
|
69 | 70 | missing = [] |
|
70 | 71 | ui = self.ui |
|
71 | 72 | |
|
72 | 73 | at = 0 |
|
73 | 74 | available = self.exists({hash for (_filename, hash) in files}) |
|
74 | 75 | with ui.makeprogress( |
|
75 | 76 | _(b'getting largefiles'), unit=_(b'files'), total=len(files) |
|
76 | 77 | ) as progress: |
|
77 | 78 | for filename, hash in files: |
|
78 | 79 | progress.update(at) |
|
79 | 80 | at += 1 |
|
80 | 81 | ui.note(_(b'getting %s:%s\n') % (filename, hash)) |
|
81 | 82 | |
|
82 | 83 | if not available.get(hash): |
|
83 | 84 | ui.warn( |
|
84 | 85 | _(b'%s: largefile %s not available from %s\n') |
|
85 | 86 | % (filename, hash, urlutil.hidepassword(self.url)) |
|
86 | 87 | ) |
|
87 | 88 | missing.append(filename) |
|
88 | 89 | continue |
|
89 | 90 | |
|
90 | 91 | if self._gethash(filename, hash): |
|
91 | 92 | success.append((filename, hash)) |
|
92 | 93 | else: |
|
93 | 94 | missing.append(filename) |
|
94 | 95 | |
|
95 | 96 | return (success, missing) |
|
96 | 97 | |
|
97 | 98 | def _gethash(self, filename, hash): |
|
98 | 99 | """Get file with the provided hash and store it in the local repo's |
|
99 | 100 | store and in the usercache. |
|
100 | 101 | filename is for informational messages only. |
|
101 | 102 | """ |
|
102 | 103 | util.makedirs(lfutil.storepath(self.repo, b'')) |
|
103 | 104 | storefilename = lfutil.storepath(self.repo, hash) |
|
104 | 105 | |
|
105 | 106 | tmpname = storefilename + b'.tmp' |
|
106 | 107 | with util.atomictempfile( |
|
107 | 108 | tmpname, createmode=self.repo.store.createmode |
|
108 | 109 | ) as tmpfile: |
|
109 | 110 | try: |
|
110 | 111 | gothash = self._getfile(tmpfile, filename, hash) |
|
111 | 112 | except StoreError as err: |
|
112 | 113 | self.ui.warn(err.longmessage()) |
|
113 | 114 | gothash = b"" |
|
114 | 115 | |
|
115 | 116 | if gothash != hash: |
|
116 | 117 | if gothash != b"": |
|
117 | 118 | self.ui.warn( |
|
118 | 119 | _(b'%s: data corruption (expected %s, got %s)\n') |
|
119 | 120 | % (filename, hash, gothash) |
|
120 | 121 | ) |
|
121 | 122 | util.unlink(tmpname) |
|
122 | 123 | return False |
|
123 | 124 | |
|
124 | 125 | util.rename(tmpname, storefilename) |
|
125 | 126 | lfutil.linktousercache(self.repo, hash) |
|
126 | 127 | return True |
|
127 | 128 | |
|
128 | 129 | def verify(self, revs, contents=False): |
|
129 | 130 | """Verify the existence (and, optionally, contents) of every big |
|
130 | 131 | file revision referenced by every changeset in revs. |
|
131 | 132 | Return 0 if all is well, non-zero on any errors.""" |
|
132 | 133 | |
|
133 | 134 | self.ui.status( |
|
134 | 135 | _(b'searching %d changesets for largefiles\n') % len(revs) |
|
135 | 136 | ) |
|
136 | 137 | verified = set() # set of (filename, filenode) tuples |
|
137 | 138 | filestocheck = [] # list of (cset, filename, expectedhash) |
|
138 | 139 | for rev in revs: |
|
139 | 140 | cctx = self.repo[rev] |
|
140 |
cset = b"%d:%s" % (cctx.rev(), |
|
|
141 | cset = b"%d:%s" % (cctx.rev(), short(cctx.node())) | |
|
141 | 142 | |
|
142 | 143 | for standin in cctx: |
|
143 | 144 | filename = lfutil.splitstandin(standin) |
|
144 | 145 | if filename: |
|
145 | 146 | fctx = cctx[standin] |
|
146 | 147 | key = (filename, fctx.filenode()) |
|
147 | 148 | if key not in verified: |
|
148 | 149 | verified.add(key) |
|
149 | 150 | expectedhash = lfutil.readasstandin(fctx) |
|
150 | 151 | filestocheck.append((cset, filename, expectedhash)) |
|
151 | 152 | |
|
152 | 153 | failed = self._verifyfiles(contents, filestocheck) |
|
153 | 154 | |
|
154 | 155 | numrevs = len(verified) |
|
155 | 156 | numlfiles = len({fname for (fname, fnode) in verified}) |
|
156 | 157 | if contents: |
|
157 | 158 | self.ui.status( |
|
158 | 159 | _(b'verified contents of %d revisions of %d largefiles\n') |
|
159 | 160 | % (numrevs, numlfiles) |
|
160 | 161 | ) |
|
161 | 162 | else: |
|
162 | 163 | self.ui.status( |
|
163 | 164 | _(b'verified existence of %d revisions of %d largefiles\n') |
|
164 | 165 | % (numrevs, numlfiles) |
|
165 | 166 | ) |
|
166 | 167 | return int(failed) |
|
167 | 168 | |
|
168 | 169 | def _getfile(self, tmpfile, filename, hash): |
|
169 | 170 | """Fetch one revision of one file from the store and write it |
|
170 | 171 | to tmpfile. Compute the hash of the file on-the-fly as it |
|
171 | 172 | downloads and return the hash. Close tmpfile. Raise |
|
172 | 173 | StoreError if unable to download the file (e.g. it does not |
|
173 | 174 | exist in the store).""" |
|
174 | 175 | raise NotImplementedError(b'abstract method') |
|
175 | 176 | |
|
176 | 177 | def _verifyfiles(self, contents, filestocheck): |
|
177 | 178 | """Perform the actual verification of files in the store. |
|
178 | 179 | 'contents' controls verification of content hash. |
|
179 | 180 | 'filestocheck' is list of files to check. |
|
180 | 181 | Returns _true_ if any problems are found! |
|
181 | 182 | """ |
|
182 | 183 | raise NotImplementedError(b'abstract method') |
@@ -1,668 +1,667 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''High-level command function for lfconvert, plus the cmdtable.''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import errno |
|
13 | 13 | import os |
|
14 | 14 | import shutil |
|
15 | 15 | |
|
16 | 16 | from mercurial.i18n import _ |
|
17 | 17 | from mercurial.node import ( |
|
18 | 18 | bin, |
|
19 | 19 | hex, |
|
20 | nullid, | |
|
21 | 20 | ) |
|
22 | 21 | |
|
23 | 22 | from mercurial import ( |
|
24 | 23 | cmdutil, |
|
25 | 24 | context, |
|
26 | 25 | error, |
|
27 | 26 | exthelper, |
|
28 | 27 | hg, |
|
29 | 28 | lock, |
|
30 | 29 | match as matchmod, |
|
31 | 30 | pycompat, |
|
32 | 31 | scmutil, |
|
33 | 32 | util, |
|
34 | 33 | ) |
|
35 | 34 | from mercurial.utils import hashutil |
|
36 | 35 | |
|
37 | 36 | from ..convert import ( |
|
38 | 37 | convcmd, |
|
39 | 38 | filemap, |
|
40 | 39 | ) |
|
41 | 40 | |
|
42 | 41 | from . import lfutil, storefactory |
|
43 | 42 | |
|
44 | 43 | release = lock.release |
|
45 | 44 | |
|
46 | 45 | # -- Commands ---------------------------------------------------------- |
|
47 | 46 | |
|
48 | 47 | eh = exthelper.exthelper() |
|
49 | 48 | |
|
50 | 49 | |
|
51 | 50 | @eh.command( |
|
52 | 51 | b'lfconvert', |
|
53 | 52 | [ |
|
54 | 53 | ( |
|
55 | 54 | b's', |
|
56 | 55 | b'size', |
|
57 | 56 | b'', |
|
58 | 57 | _(b'minimum size (MB) for files to be converted as largefiles'), |
|
59 | 58 | b'SIZE', |
|
60 | 59 | ), |
|
61 | 60 | ( |
|
62 | 61 | b'', |
|
63 | 62 | b'to-normal', |
|
64 | 63 | False, |
|
65 | 64 | _(b'convert from a largefiles repo to a normal repo'), |
|
66 | 65 | ), |
|
67 | 66 | ], |
|
68 | 67 | _(b'hg lfconvert SOURCE DEST [FILE ...]'), |
|
69 | 68 | norepo=True, |
|
70 | 69 | inferrepo=True, |
|
71 | 70 | ) |
|
72 | 71 | def lfconvert(ui, src, dest, *pats, **opts): |
|
73 | 72 | """convert a normal repository to a largefiles repository |
|
74 | 73 | |
|
75 | 74 | Convert repository SOURCE to a new repository DEST, identical to |
|
76 | 75 | SOURCE except that certain files will be converted as largefiles: |
|
77 | 76 | specifically, any file that matches any PATTERN *or* whose size is |
|
78 | 77 | above the minimum size threshold is converted as a largefile. The |
|
79 | 78 | size used to determine whether or not to track a file as a |
|
80 | 79 | largefile is the size of the first version of the file. The |
|
81 | 80 | minimum size can be specified either with --size or in |
|
82 | 81 | configuration as ``largefiles.size``. |
|
83 | 82 | |
|
84 | 83 | After running this command you will need to make sure that |
|
85 | 84 | largefiles is enabled anywhere you intend to push the new |
|
86 | 85 | repository. |
|
87 | 86 | |
|
88 | 87 | Use --to-normal to convert largefiles back to normal files; after |
|
89 | 88 | this, the DEST repository can be used without largefiles at all.""" |
|
90 | 89 | |
|
91 | 90 | opts = pycompat.byteskwargs(opts) |
|
92 | 91 | if opts[b'to_normal']: |
|
93 | 92 | tolfile = False |
|
94 | 93 | else: |
|
95 | 94 | tolfile = True |
|
96 | 95 | size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None) |
|
97 | 96 | |
|
98 | 97 | if not hg.islocal(src): |
|
99 | 98 | raise error.Abort(_(b'%s is not a local Mercurial repo') % src) |
|
100 | 99 | if not hg.islocal(dest): |
|
101 | 100 | raise error.Abort(_(b'%s is not a local Mercurial repo') % dest) |
|
102 | 101 | |
|
103 | 102 | rsrc = hg.repository(ui, src) |
|
104 | 103 | ui.status(_(b'initializing destination %s\n') % dest) |
|
105 | 104 | rdst = hg.repository(ui, dest, create=True) |
|
106 | 105 | |
|
107 | 106 | success = False |
|
108 | 107 | dstwlock = dstlock = None |
|
109 | 108 | try: |
|
110 | 109 | # Get a list of all changesets in the source. The easy way to do this |
|
111 | 110 | # is to simply walk the changelog, using changelog.nodesbetween(). |
|
112 | 111 | # Take a look at mercurial/revlog.py:639 for more details. |
|
113 | 112 | # Use a generator instead of a list to decrease memory usage |
|
114 | 113 | ctxs = ( |
|
115 | 114 | rsrc[ctx] |
|
116 | 115 | for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0] |
|
117 | 116 | ) |
|
118 | revmap = {nullid: nullid} | |
|
117 | revmap = {rsrc.nullid: rdst.nullid} | |
|
119 | 118 | if tolfile: |
|
120 | 119 | # Lock destination to prevent modification while it is converted to. |
|
121 | 120 | # Don't need to lock src because we are just reading from its |
|
122 | 121 | # history which can't change. |
|
123 | 122 | dstwlock = rdst.wlock() |
|
124 | 123 | dstlock = rdst.lock() |
|
125 | 124 | |
|
126 | 125 | lfiles = set() |
|
127 | 126 | normalfiles = set() |
|
128 | 127 | if not pats: |
|
129 | 128 | pats = ui.configlist(lfutil.longname, b'patterns') |
|
130 | 129 | if pats: |
|
131 | 130 | matcher = matchmod.match(rsrc.root, b'', list(pats)) |
|
132 | 131 | else: |
|
133 | 132 | matcher = None |
|
134 | 133 | |
|
135 | 134 | lfiletohash = {} |
|
136 | 135 | with ui.makeprogress( |
|
137 | 136 | _(b'converting revisions'), |
|
138 | 137 | unit=_(b'revisions'), |
|
139 | 138 | total=rsrc[b'tip'].rev(), |
|
140 | 139 | ) as progress: |
|
141 | 140 | for ctx in ctxs: |
|
142 | 141 | progress.update(ctx.rev()) |
|
143 | 142 | _lfconvert_addchangeset( |
|
144 | 143 | rsrc, |
|
145 | 144 | rdst, |
|
146 | 145 | ctx, |
|
147 | 146 | revmap, |
|
148 | 147 | lfiles, |
|
149 | 148 | normalfiles, |
|
150 | 149 | matcher, |
|
151 | 150 | size, |
|
152 | 151 | lfiletohash, |
|
153 | 152 | ) |
|
154 | 153 | |
|
155 | 154 | if rdst.wvfs.exists(lfutil.shortname): |
|
156 | 155 | rdst.wvfs.rmtree(lfutil.shortname) |
|
157 | 156 | |
|
158 | 157 | for f in lfiletohash.keys(): |
|
159 | 158 | if rdst.wvfs.isfile(f): |
|
160 | 159 | rdst.wvfs.unlink(f) |
|
161 | 160 | try: |
|
162 | 161 | rdst.wvfs.removedirs(rdst.wvfs.dirname(f)) |
|
163 | 162 | except OSError: |
|
164 | 163 | pass |
|
165 | 164 | |
|
166 | 165 | # If there were any files converted to largefiles, add largefiles |
|
167 | 166 | # to the destination repository's requirements. |
|
168 | 167 | if lfiles: |
|
169 | 168 | rdst.requirements.add(b'largefiles') |
|
170 | 169 | scmutil.writereporequirements(rdst) |
|
171 | 170 | else: |
|
172 | 171 | |
|
173 | 172 | class lfsource(filemap.filemap_source): |
|
174 | 173 | def __init__(self, ui, source): |
|
175 | 174 | super(lfsource, self).__init__(ui, source, None) |
|
176 | 175 | self.filemapper.rename[lfutil.shortname] = b'.' |
|
177 | 176 | |
|
178 | 177 | def getfile(self, name, rev): |
|
179 | 178 | realname, realrev = rev |
|
180 | 179 | f = super(lfsource, self).getfile(name, rev) |
|
181 | 180 | |
|
182 | 181 | if ( |
|
183 | 182 | not realname.startswith(lfutil.shortnameslash) |
|
184 | 183 | or f[0] is None |
|
185 | 184 | ): |
|
186 | 185 | return f |
|
187 | 186 | |
|
188 | 187 | # Substitute in the largefile data for the hash |
|
189 | 188 | hash = f[0].strip() |
|
190 | 189 | path = lfutil.findfile(rsrc, hash) |
|
191 | 190 | |
|
192 | 191 | if path is None: |
|
193 | 192 | raise error.Abort( |
|
194 | 193 | _(b"missing largefile for '%s' in %s") |
|
195 | 194 | % (realname, realrev) |
|
196 | 195 | ) |
|
197 | 196 | return util.readfile(path), f[1] |
|
198 | 197 | |
|
199 | 198 | class converter(convcmd.converter): |
|
200 | 199 | def __init__(self, ui, source, dest, revmapfile, opts): |
|
201 | 200 | src = lfsource(ui, source) |
|
202 | 201 | |
|
203 | 202 | super(converter, self).__init__( |
|
204 | 203 | ui, src, dest, revmapfile, opts |
|
205 | 204 | ) |
|
206 | 205 | |
|
207 | 206 | found, missing = downloadlfiles(ui, rsrc) |
|
208 | 207 | if missing != 0: |
|
209 | 208 | raise error.Abort(_(b"all largefiles must be present locally")) |
|
210 | 209 | |
|
211 | 210 | orig = convcmd.converter |
|
212 | 211 | convcmd.converter = converter |
|
213 | 212 | |
|
214 | 213 | try: |
|
215 | 214 | convcmd.convert( |
|
216 | 215 | ui, src, dest, source_type=b'hg', dest_type=b'hg' |
|
217 | 216 | ) |
|
218 | 217 | finally: |
|
219 | 218 | convcmd.converter = orig |
|
220 | 219 | success = True |
|
221 | 220 | finally: |
|
222 | 221 | if tolfile: |
|
223 | 222 | rdst.dirstate.clear() |
|
224 | 223 | release(dstlock, dstwlock) |
|
225 | 224 | if not success: |
|
226 | 225 | # we failed, remove the new directory |
|
227 | 226 | shutil.rmtree(rdst.root) |
|
228 | 227 | |
|
229 | 228 | |
|
230 | 229 | def _lfconvert_addchangeset( |
|
231 | 230 | rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash |
|
232 | 231 | ): |
|
233 | 232 | # Convert src parents to dst parents |
|
234 | 233 | parents = _convertparents(ctx, revmap) |
|
235 | 234 | |
|
236 | 235 | # Generate list of changed files |
|
237 | 236 | files = _getchangedfiles(ctx, parents) |
|
238 | 237 | |
|
239 | 238 | dstfiles = [] |
|
240 | 239 | for f in files: |
|
241 | 240 | if f not in lfiles and f not in normalfiles: |
|
242 | 241 | islfile = _islfile(f, ctx, matcher, size) |
|
243 | 242 | # If this file was renamed or copied then copy |
|
244 | 243 | # the largefile-ness of its predecessor |
|
245 | 244 | if f in ctx.manifest(): |
|
246 | 245 | fctx = ctx.filectx(f) |
|
247 | 246 | renamed = fctx.copysource() |
|
248 | 247 | if renamed is None: |
|
249 | 248 | # the code below assumes renamed to be a boolean or a list |
|
250 | 249 | # and won't quite work with the value None |
|
251 | 250 | renamed = False |
|
252 | 251 | renamedlfile = renamed and renamed in lfiles |
|
253 | 252 | islfile |= renamedlfile |
|
254 | 253 | if b'l' in fctx.flags(): |
|
255 | 254 | if renamedlfile: |
|
256 | 255 | raise error.Abort( |
|
257 | 256 | _(b'renamed/copied largefile %s becomes symlink') |
|
258 | 257 | % f |
|
259 | 258 | ) |
|
260 | 259 | islfile = False |
|
261 | 260 | if islfile: |
|
262 | 261 | lfiles.add(f) |
|
263 | 262 | else: |
|
264 | 263 | normalfiles.add(f) |
|
265 | 264 | |
|
266 | 265 | if f in lfiles: |
|
267 | 266 | fstandin = lfutil.standin(f) |
|
268 | 267 | dstfiles.append(fstandin) |
|
269 | 268 | # largefile in manifest if it has not been removed/renamed |
|
270 | 269 | if f in ctx.manifest(): |
|
271 | 270 | fctx = ctx.filectx(f) |
|
272 | 271 | if b'l' in fctx.flags(): |
|
273 | 272 | renamed = fctx.copysource() |
|
274 | 273 | if renamed and renamed in lfiles: |
|
275 | 274 | raise error.Abort( |
|
276 | 275 | _(b'largefile %s becomes symlink') % f |
|
277 | 276 | ) |
|
278 | 277 | |
|
279 | 278 | # largefile was modified, update standins |
|
280 | 279 | m = hashutil.sha1(b'') |
|
281 | 280 | m.update(ctx[f].data()) |
|
282 | 281 | hash = hex(m.digest()) |
|
283 | 282 | if f not in lfiletohash or lfiletohash[f] != hash: |
|
284 | 283 | rdst.wwrite(f, ctx[f].data(), ctx[f].flags()) |
|
285 | 284 | executable = b'x' in ctx[f].flags() |
|
286 | 285 | lfutil.writestandin(rdst, fstandin, hash, executable) |
|
287 | 286 | lfiletohash[f] = hash |
|
288 | 287 | else: |
|
289 | 288 | # normal file |
|
290 | 289 | dstfiles.append(f) |
|
291 | 290 | |
|
292 | 291 | def getfilectx(repo, memctx, f): |
|
293 | 292 | srcfname = lfutil.splitstandin(f) |
|
294 | 293 | if srcfname is not None: |
|
295 | 294 | # if the file isn't in the manifest then it was removed |
|
296 | 295 | # or renamed, return None to indicate this |
|
297 | 296 | try: |
|
298 | 297 | fctx = ctx.filectx(srcfname) |
|
299 | 298 | except error.LookupError: |
|
300 | 299 | return None |
|
301 | 300 | renamed = fctx.copysource() |
|
302 | 301 | if renamed: |
|
303 | 302 | # standin is always a largefile because largefile-ness |
|
304 | 303 | # doesn't change after rename or copy |
|
305 | 304 | renamed = lfutil.standin(renamed) |
|
306 | 305 | |
|
307 | 306 | return context.memfilectx( |
|
308 | 307 | repo, |
|
309 | 308 | memctx, |
|
310 | 309 | f, |
|
311 | 310 | lfiletohash[srcfname] + b'\n', |
|
312 | 311 | b'l' in fctx.flags(), |
|
313 | 312 | b'x' in fctx.flags(), |
|
314 | 313 | renamed, |
|
315 | 314 | ) |
|
316 | 315 | else: |
|
317 | 316 | return _getnormalcontext(repo, ctx, f, revmap) |
|
318 | 317 | |
|
319 | 318 | # Commit |
|
320 | 319 | _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap) |
|
321 | 320 | |
|
322 | 321 | |
|
323 | 322 | def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap): |
|
324 | 323 | mctx = context.memctx( |
|
325 | 324 | rdst, |
|
326 | 325 | parents, |
|
327 | 326 | ctx.description(), |
|
328 | 327 | dstfiles, |
|
329 | 328 | getfilectx, |
|
330 | 329 | ctx.user(), |
|
331 | 330 | ctx.date(), |
|
332 | 331 | ctx.extra(), |
|
333 | 332 | ) |
|
334 | 333 | ret = rdst.commitctx(mctx) |
|
335 | 334 | lfutil.copyalltostore(rdst, ret) |
|
336 | 335 | rdst.setparents(ret) |
|
337 | 336 | revmap[ctx.node()] = rdst.changelog.tip() |
|
338 | 337 | |
|
339 | 338 | |
|
340 | 339 | # Generate list of changed files |
|
341 | 340 | def _getchangedfiles(ctx, parents): |
|
342 | 341 | files = set(ctx.files()) |
|
343 | if nullid not in parents: | |
|
342 | if ctx.repo().nullid not in parents: | |
|
344 | 343 | mc = ctx.manifest() |
|
345 | 344 | for pctx in ctx.parents(): |
|
346 | 345 | for fn in pctx.manifest().diff(mc): |
|
347 | 346 | files.add(fn) |
|
348 | 347 | return files |
|
349 | 348 | |
|
350 | 349 | |
|
351 | 350 | # Convert src parents to dst parents |
|
352 | 351 | def _convertparents(ctx, revmap): |
|
353 | 352 | parents = [] |
|
354 | 353 | for p in ctx.parents(): |
|
355 | 354 | parents.append(revmap[p.node()]) |
|
356 | 355 | while len(parents) < 2: |
|
357 | parents.append(nullid) | |
|
356 | parents.append(ctx.repo().nullid) | |
|
358 | 357 | return parents |
|
359 | 358 | |
|
360 | 359 | |
|
361 | 360 | # Get memfilectx for a normal file |
|
362 | 361 | def _getnormalcontext(repo, ctx, f, revmap): |
|
363 | 362 | try: |
|
364 | 363 | fctx = ctx.filectx(f) |
|
365 | 364 | except error.LookupError: |
|
366 | 365 | return None |
|
367 | 366 | renamed = fctx.copysource() |
|
368 | 367 | |
|
369 | 368 | data = fctx.data() |
|
370 | 369 | if f == b'.hgtags': |
|
371 | 370 | data = _converttags(repo.ui, revmap, data) |
|
372 | 371 | return context.memfilectx( |
|
373 | 372 | repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed |
|
374 | 373 | ) |
|
375 | 374 | |
|
376 | 375 | |
|
377 | 376 | # Remap tag data using a revision map |
|
378 | 377 | def _converttags(ui, revmap, data): |
|
379 | 378 | newdata = [] |
|
380 | 379 | for line in data.splitlines(): |
|
381 | 380 | try: |
|
382 | 381 | id, name = line.split(b' ', 1) |
|
383 | 382 | except ValueError: |
|
384 | 383 | ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line) |
|
385 | 384 | continue |
|
386 | 385 | try: |
|
387 | 386 | newid = bin(id) |
|
388 | 387 | except TypeError: |
|
389 | 388 | ui.warn(_(b'skipping incorrectly formatted id %s\n') % id) |
|
390 | 389 | continue |
|
391 | 390 | try: |
|
392 | 391 | newdata.append(b'%s %s\n' % (hex(revmap[newid]), name)) |
|
393 | 392 | except KeyError: |
|
394 | 393 | ui.warn(_(b'no mapping for id %s\n') % id) |
|
395 | 394 | continue |
|
396 | 395 | return b''.join(newdata) |
|
397 | 396 | |
|
398 | 397 | |
|
399 | 398 | def _islfile(file, ctx, matcher, size): |
|
400 | 399 | """Return true if file should be considered a largefile, i.e. |
|
401 | 400 | matcher matches it or it is larger than size.""" |
|
402 | 401 | # never store special .hg* files as largefiles |
|
403 | 402 | if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs': |
|
404 | 403 | return False |
|
405 | 404 | if matcher and matcher(file): |
|
406 | 405 | return True |
|
407 | 406 | try: |
|
408 | 407 | return ctx.filectx(file).size() >= size * 1024 * 1024 |
|
409 | 408 | except error.LookupError: |
|
410 | 409 | return False |
|
411 | 410 | |
|
412 | 411 | |
|
413 | 412 | def uploadlfiles(ui, rsrc, rdst, files): |
|
414 | 413 | '''upload largefiles to the central store''' |
|
415 | 414 | |
|
416 | 415 | if not files: |
|
417 | 416 | return |
|
418 | 417 | |
|
419 | 418 | store = storefactory.openstore(rsrc, rdst, put=True) |
|
420 | 419 | |
|
421 | 420 | at = 0 |
|
422 | 421 | ui.debug(b"sending statlfile command for %d largefiles\n" % len(files)) |
|
423 | 422 | retval = store.exists(files) |
|
424 | 423 | files = [h for h in files if not retval[h]] |
|
425 | 424 | ui.debug(b"%d largefiles need to be uploaded\n" % len(files)) |
|
426 | 425 | |
|
427 | 426 | with ui.makeprogress( |
|
428 | 427 | _(b'uploading largefiles'), unit=_(b'files'), total=len(files) |
|
429 | 428 | ) as progress: |
|
430 | 429 | for hash in files: |
|
431 | 430 | progress.update(at) |
|
432 | 431 | source = lfutil.findfile(rsrc, hash) |
|
433 | 432 | if not source: |
|
434 | 433 | raise error.Abort( |
|
435 | 434 | _( |
|
436 | 435 | b'largefile %s missing from store' |
|
437 | 436 | b' (needs to be uploaded)' |
|
438 | 437 | ) |
|
439 | 438 | % hash |
|
440 | 439 | ) |
|
441 | 440 | # XXX check for errors here |
|
442 | 441 | store.put(source, hash) |
|
443 | 442 | at += 1 |
|
444 | 443 | |
|
445 | 444 | |
|
446 | 445 | def verifylfiles(ui, repo, all=False, contents=False): |
|
447 | 446 | """Verify that every largefile revision in the current changeset |
|
448 | 447 | exists in the central store. With --contents, also verify that |
|
449 | 448 | the contents of each local largefile file revision are correct (SHA-1 hash |
|
450 | 449 | matches the revision ID). With --all, check every changeset in |
|
451 | 450 | this repository.""" |
|
452 | 451 | if all: |
|
453 | 452 | revs = repo.revs(b'all()') |
|
454 | 453 | else: |
|
455 | 454 | revs = [b'.'] |
|
456 | 455 | |
|
457 | 456 | store = storefactory.openstore(repo) |
|
458 | 457 | return store.verify(revs, contents=contents) |
|
459 | 458 | |
|
460 | 459 | |
|
461 | 460 | def cachelfiles(ui, repo, node, filelist=None): |
|
462 | 461 | """cachelfiles ensures that all largefiles needed by the specified revision |
|
463 | 462 | are present in the repository's largefile cache. |
|
464 | 463 | |
|
465 | 464 | returns a tuple (cached, missing). cached is the list of files downloaded |
|
466 | 465 | by this operation; missing is the list of files that were needed but could |
|
467 | 466 | not be found.""" |
|
468 | 467 | lfiles = lfutil.listlfiles(repo, node) |
|
469 | 468 | if filelist: |
|
470 | 469 | lfiles = set(lfiles) & set(filelist) |
|
471 | 470 | toget = [] |
|
472 | 471 | |
|
473 | 472 | ctx = repo[node] |
|
474 | 473 | for lfile in lfiles: |
|
475 | 474 | try: |
|
476 | 475 | expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)]) |
|
477 | 476 | except IOError as err: |
|
478 | 477 | if err.errno == errno.ENOENT: |
|
479 | 478 | continue # node must be None and standin wasn't found in wctx |
|
480 | 479 | raise |
|
481 | 480 | if not lfutil.findfile(repo, expectedhash): |
|
482 | 481 | toget.append((lfile, expectedhash)) |
|
483 | 482 | |
|
484 | 483 | if toget: |
|
485 | 484 | store = storefactory.openstore(repo) |
|
486 | 485 | ret = store.get(toget) |
|
487 | 486 | return ret |
|
488 | 487 | |
|
489 | 488 | return ([], []) |
|
490 | 489 | |
|
491 | 490 | |
|
492 | 491 | def downloadlfiles(ui, repo): |
|
493 | 492 | tonode = repo.changelog.node |
|
494 | 493 | totalsuccess = 0 |
|
495 | 494 | totalmissing = 0 |
|
496 | 495 | for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname): |
|
497 | 496 | success, missing = cachelfiles(ui, repo, tonode(rev)) |
|
498 | 497 | totalsuccess += len(success) |
|
499 | 498 | totalmissing += len(missing) |
|
500 | 499 | ui.status(_(b"%d additional largefiles cached\n") % totalsuccess) |
|
501 | 500 | if totalmissing > 0: |
|
502 | 501 | ui.status(_(b"%d largefiles failed to download\n") % totalmissing) |
|
503 | 502 | return totalsuccess, totalmissing |
|
504 | 503 | |
|
505 | 504 | |
|
506 | 505 | def updatelfiles( |
|
507 | 506 | ui, repo, filelist=None, printmessage=None, normallookup=False |
|
508 | 507 | ): |
|
509 | 508 | """Update largefiles according to standins in the working directory |
|
510 | 509 | |
|
511 | 510 | If ``printmessage`` is other than ``None``, it means "print (or |
|
512 | 511 | ignore, for false) message forcibly". |
|
513 | 512 | """ |
|
514 | 513 | statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) |
|
515 | 514 | with repo.wlock(): |
|
516 | 515 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
517 | 516 | lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) |
|
518 | 517 | |
|
519 | 518 | if filelist is not None: |
|
520 | 519 | filelist = set(filelist) |
|
521 | 520 | lfiles = [f for f in lfiles if f in filelist] |
|
522 | 521 | |
|
523 | 522 | update = {} |
|
524 | 523 | dropped = set() |
|
525 | 524 | updated, removed = 0, 0 |
|
526 | 525 | wvfs = repo.wvfs |
|
527 | 526 | wctx = repo[None] |
|
528 | 527 | for lfile in lfiles: |
|
529 | 528 | lfileorig = os.path.relpath( |
|
530 | 529 | scmutil.backuppath(ui, repo, lfile), start=repo.root |
|
531 | 530 | ) |
|
532 | 531 | standin = lfutil.standin(lfile) |
|
533 | 532 | standinorig = os.path.relpath( |
|
534 | 533 | scmutil.backuppath(ui, repo, standin), start=repo.root |
|
535 | 534 | ) |
|
536 | 535 | if wvfs.exists(standin): |
|
537 | 536 | if wvfs.exists(standinorig) and wvfs.exists(lfile): |
|
538 | 537 | shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig)) |
|
539 | 538 | wvfs.unlinkpath(standinorig) |
|
540 | 539 | expecthash = lfutil.readasstandin(wctx[standin]) |
|
541 | 540 | if expecthash != b'': |
|
542 | 541 | if lfile not in wctx: # not switched to normal file |
|
543 | 542 | if repo.dirstate[standin] != b'?': |
|
544 | 543 | wvfs.unlinkpath(lfile, ignoremissing=True) |
|
545 | 544 | else: |
|
546 | 545 | dropped.add(lfile) |
|
547 | 546 | |
|
548 | 547 | # use normallookup() to allocate an entry in largefiles |
|
549 | 548 | # dirstate to prevent lfilesrepo.status() from reporting |
|
550 | 549 | # missing files as removed. |
|
551 | 550 | lfdirstate.normallookup(lfile) |
|
552 | 551 | update[lfile] = expecthash |
|
553 | 552 | else: |
|
554 | 553 | # Remove lfiles for which the standin is deleted, unless the |
|
555 | 554 | # lfile is added to the repository again. This happens when a |
|
556 | 555 | # largefile is converted back to a normal file: the standin |
|
557 | 556 | # disappears, but a new (normal) file appears as the lfile. |
|
558 | 557 | if ( |
|
559 | 558 | wvfs.exists(lfile) |
|
560 | 559 | and repo.dirstate.normalize(lfile) not in wctx |
|
561 | 560 | ): |
|
562 | 561 | wvfs.unlinkpath(lfile) |
|
563 | 562 | removed += 1 |
|
564 | 563 | |
|
565 | 564 | # largefile processing might be slow and be interrupted - be prepared |
|
566 | 565 | lfdirstate.write() |
|
567 | 566 | |
|
568 | 567 | if lfiles: |
|
569 | 568 | lfiles = [f for f in lfiles if f not in dropped] |
|
570 | 569 | |
|
571 | 570 | for f in dropped: |
|
572 | 571 | repo.wvfs.unlinkpath(lfutil.standin(f)) |
|
573 | 572 | |
|
574 | 573 | # This needs to happen for dropped files, otherwise they stay in |
|
575 | 574 | # the M state. |
|
576 | 575 | lfutil.synclfdirstate(repo, lfdirstate, f, normallookup) |
|
577 | 576 | |
|
578 | 577 | statuswriter(_(b'getting changed largefiles\n')) |
|
579 | 578 | cachelfiles(ui, repo, None, lfiles) |
|
580 | 579 | |
|
581 | 580 | for lfile in lfiles: |
|
582 | 581 | update1 = 0 |
|
583 | 582 | |
|
584 | 583 | expecthash = update.get(lfile) |
|
585 | 584 | if expecthash: |
|
586 | 585 | if not lfutil.copyfromcache(repo, expecthash, lfile): |
|
587 | 586 | # failed ... but already removed and set to normallookup |
|
588 | 587 | continue |
|
589 | 588 | # Synchronize largefile dirstate to the last modified |
|
590 | 589 | # time of the file |
|
591 | 590 | lfdirstate.normal(lfile) |
|
592 | 591 | update1 = 1 |
|
593 | 592 | |
|
594 | 593 | # copy the exec mode of largefile standin from the repository's |
|
595 | 594 | # dirstate to its state in the lfdirstate. |
|
596 | 595 | standin = lfutil.standin(lfile) |
|
597 | 596 | if wvfs.exists(standin): |
|
598 | 597 | # exec is decided by the users permissions using mask 0o100 |
|
599 | 598 | standinexec = wvfs.stat(standin).st_mode & 0o100 |
|
600 | 599 | st = wvfs.stat(lfile) |
|
601 | 600 | mode = st.st_mode |
|
602 | 601 | if standinexec != mode & 0o100: |
|
603 | 602 | # first remove all X bits, then shift all R bits to X |
|
604 | 603 | mode &= ~0o111 |
|
605 | 604 | if standinexec: |
|
606 | 605 | mode |= (mode >> 2) & 0o111 & ~util.umask |
|
607 | 606 | wvfs.chmod(lfile, mode) |
|
608 | 607 | update1 = 1 |
|
609 | 608 | |
|
610 | 609 | updated += update1 |
|
611 | 610 | |
|
612 | 611 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) |
|
613 | 612 | |
|
614 | 613 | lfdirstate.write() |
|
615 | 614 | if lfiles: |
|
616 | 615 | statuswriter( |
|
617 | 616 | _(b'%d largefiles updated, %d removed\n') % (updated, removed) |
|
618 | 617 | ) |
|
619 | 618 | |
|
620 | 619 | |
|
621 | 620 | @eh.command( |
|
622 | 621 | b'lfpull', |
|
623 | 622 | [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))] |
|
624 | 623 | + cmdutil.remoteopts, |
|
625 | 624 | _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'), |
|
626 | 625 | ) |
|
627 | 626 | def lfpull(ui, repo, source=b"default", **opts): |
|
628 | 627 | """pull largefiles for the specified revisions from the specified source |
|
629 | 628 | |
|
630 | 629 | Pull largefiles that are referenced from local changesets but missing |
|
631 | 630 | locally, pulling from a remote repository to the local cache. |
|
632 | 631 | |
|
633 | 632 | If SOURCE is omitted, the 'default' path will be used. |
|
634 | 633 | See :hg:`help urls` for more information. |
|
635 | 634 | |
|
636 | 635 | .. container:: verbose |
|
637 | 636 | |
|
638 | 637 | Some examples: |
|
639 | 638 | |
|
640 | 639 | - pull largefiles for all branch heads:: |
|
641 | 640 | |
|
642 | 641 | hg lfpull -r "head() and not closed()" |
|
643 | 642 | |
|
644 | 643 | - pull largefiles on the default branch:: |
|
645 | 644 | |
|
646 | 645 | hg lfpull -r "branch(default)" |
|
647 | 646 | """ |
|
648 | 647 | repo.lfpullsource = source |
|
649 | 648 | |
|
650 | 649 | revs = opts.get('rev', []) |
|
651 | 650 | if not revs: |
|
652 | 651 | raise error.Abort(_(b'no revisions specified')) |
|
653 | 652 | revs = scmutil.revrange(repo, revs) |
|
654 | 653 | |
|
655 | 654 | numcached = 0 |
|
656 | 655 | for rev in revs: |
|
657 | 656 | ui.note(_(b'pulling largefiles for revision %d\n') % rev) |
|
658 | 657 | (cached, missing) = cachelfiles(ui, repo, rev) |
|
659 | 658 | numcached += len(cached) |
|
660 | 659 | ui.status(_(b"%d largefiles cached\n") % numcached) |
|
661 | 660 | |
|
662 | 661 | |
|
663 | 662 | @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE')) |
|
664 | 663 | def debuglfput(ui, repo, filepath, **kwargs): |
|
665 | 664 | hash = lfutil.hashfile(filepath) |
|
666 | 665 | storefactory.openstore(repo).put(filepath, hash) |
|
667 | 666 | ui.write(b'%s\n' % hash) |
|
668 | 667 | return 0 |
@@ -1,784 +1,781 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''largefiles utility code: must not import other modules in this package.''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import contextlib |
|
13 | 13 | import copy |
|
14 | 14 | import os |
|
15 | 15 | import stat |
|
16 | 16 | |
|
17 | 17 | from mercurial.i18n import _ |
|
18 |
from mercurial.node import |
|
|
19 | hex, | |
|
20 | nullid, | |
|
21 | ) | |
|
18 | from mercurial.node import hex | |
|
22 | 19 | from mercurial.pycompat import open |
|
23 | 20 | |
|
24 | 21 | from mercurial import ( |
|
25 | 22 | dirstate, |
|
26 | 23 | encoding, |
|
27 | 24 | error, |
|
28 | 25 | httpconnection, |
|
29 | 26 | match as matchmod, |
|
30 | 27 | pycompat, |
|
31 | 28 | scmutil, |
|
32 | 29 | sparse, |
|
33 | 30 | util, |
|
34 | 31 | vfs as vfsmod, |
|
35 | 32 | ) |
|
36 | 33 | from mercurial.utils import hashutil |
|
37 | 34 | |
|
38 | 35 | shortname = b'.hglf' |
|
39 | 36 | shortnameslash = shortname + b'/' |
|
40 | 37 | longname = b'largefiles' |
|
41 | 38 | |
|
42 | 39 | # -- Private worker functions ------------------------------------------ |
|
43 | 40 | |
|
44 | 41 | |
|
45 | 42 | @contextlib.contextmanager |
|
46 | 43 | def lfstatus(repo, value=True): |
|
47 | 44 | oldvalue = getattr(repo, 'lfstatus', False) |
|
48 | 45 | repo.lfstatus = value |
|
49 | 46 | try: |
|
50 | 47 | yield |
|
51 | 48 | finally: |
|
52 | 49 | repo.lfstatus = oldvalue |
|
53 | 50 | |
|
54 | 51 | |
|
55 | 52 | def getminsize(ui, assumelfiles, opt, default=10): |
|
56 | 53 | lfsize = opt |
|
57 | 54 | if not lfsize and assumelfiles: |
|
58 | 55 | lfsize = ui.config(longname, b'minsize', default=default) |
|
59 | 56 | if lfsize: |
|
60 | 57 | try: |
|
61 | 58 | lfsize = float(lfsize) |
|
62 | 59 | except ValueError: |
|
63 | 60 | raise error.Abort( |
|
64 | 61 | _(b'largefiles: size must be number (not %s)\n') % lfsize |
|
65 | 62 | ) |
|
66 | 63 | if lfsize is None: |
|
67 | 64 | raise error.Abort(_(b'minimum size for largefiles must be specified')) |
|
68 | 65 | return lfsize |
|
69 | 66 | |
|
70 | 67 | |
|
71 | 68 | def link(src, dest): |
|
72 | 69 | """Try to create hardlink - if that fails, efficiently make a copy.""" |
|
73 | 70 | util.makedirs(os.path.dirname(dest)) |
|
74 | 71 | try: |
|
75 | 72 | util.oslink(src, dest) |
|
76 | 73 | except OSError: |
|
77 | 74 | # if hardlinks fail, fallback on atomic copy |
|
78 | 75 | with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf: |
|
79 | 76 | for chunk in util.filechunkiter(srcf): |
|
80 | 77 | dstf.write(chunk) |
|
81 | 78 | os.chmod(dest, os.stat(src).st_mode) |
|
82 | 79 | |
|
83 | 80 | |
|
84 | 81 | def usercachepath(ui, hash): |
|
85 | 82 | """Return the correct location in the "global" largefiles cache for a file |
|
86 | 83 | with the given hash. |
|
87 | 84 | This cache is used for sharing of largefiles across repositories - both |
|
88 | 85 | to preserve download bandwidth and storage space.""" |
|
89 | 86 | return os.path.join(_usercachedir(ui), hash) |
|
90 | 87 | |
|
91 | 88 | |
|
92 | 89 | def _usercachedir(ui, name=longname): |
|
93 | 90 | '''Return the location of the "global" largefiles cache.''' |
|
94 | 91 | path = ui.configpath(name, b'usercache') |
|
95 | 92 | if path: |
|
96 | 93 | return path |
|
97 | 94 | |
|
98 | 95 | hint = None |
|
99 | 96 | |
|
100 | 97 | if pycompat.iswindows: |
|
101 | 98 | appdata = encoding.environ.get( |
|
102 | 99 | b'LOCALAPPDATA', encoding.environ.get(b'APPDATA') |
|
103 | 100 | ) |
|
104 | 101 | if appdata: |
|
105 | 102 | return os.path.join(appdata, name) |
|
106 | 103 | |
|
107 | 104 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( |
|
108 | 105 | b"LOCALAPPDATA", |
|
109 | 106 | b"APPDATA", |
|
110 | 107 | name, |
|
111 | 108 | ) |
|
112 | 109 | elif pycompat.isdarwin: |
|
113 | 110 | home = encoding.environ.get(b'HOME') |
|
114 | 111 | if home: |
|
115 | 112 | return os.path.join(home, b'Library', b'Caches', name) |
|
116 | 113 | |
|
117 | 114 | hint = _(b"define %s in the environment, or set %s.usercache") % ( |
|
118 | 115 | b"HOME", |
|
119 | 116 | name, |
|
120 | 117 | ) |
|
121 | 118 | elif pycompat.isposix: |
|
122 | 119 | path = encoding.environ.get(b'XDG_CACHE_HOME') |
|
123 | 120 | if path: |
|
124 | 121 | return os.path.join(path, name) |
|
125 | 122 | home = encoding.environ.get(b'HOME') |
|
126 | 123 | if home: |
|
127 | 124 | return os.path.join(home, b'.cache', name) |
|
128 | 125 | |
|
129 | 126 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( |
|
130 | 127 | b"XDG_CACHE_HOME", |
|
131 | 128 | b"HOME", |
|
132 | 129 | name, |
|
133 | 130 | ) |
|
134 | 131 | else: |
|
135 | 132 | raise error.Abort( |
|
136 | 133 | _(b'unknown operating system: %s\n') % pycompat.osname |
|
137 | 134 | ) |
|
138 | 135 | |
|
139 | 136 | raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint) |
|
140 | 137 | |
|
141 | 138 | |
|
142 | 139 | def inusercache(ui, hash): |
|
143 | 140 | path = usercachepath(ui, hash) |
|
144 | 141 | return os.path.exists(path) |
|
145 | 142 | |
|
146 | 143 | |
|
147 | 144 | def findfile(repo, hash): |
|
148 | 145 | """Return store path of the largefile with the specified hash. |
|
149 | 146 | As a side effect, the file might be linked from user cache. |
|
150 | 147 | Return None if the file can't be found locally.""" |
|
151 | 148 | path, exists = findstorepath(repo, hash) |
|
152 | 149 | if exists: |
|
153 | 150 | repo.ui.note(_(b'found %s in store\n') % hash) |
|
154 | 151 | return path |
|
155 | 152 | elif inusercache(repo.ui, hash): |
|
156 | 153 | repo.ui.note(_(b'found %s in system cache\n') % hash) |
|
157 | 154 | path = storepath(repo, hash) |
|
158 | 155 | link(usercachepath(repo.ui, hash), path) |
|
159 | 156 | return path |
|
160 | 157 | return None |
|
161 | 158 | |
|
162 | 159 | |
|
163 | 160 | class largefilesdirstate(dirstate.dirstate): |
|
164 | 161 | def __getitem__(self, key): |
|
165 | 162 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) |
|
166 | 163 | |
|
167 | 164 | def normal(self, f): |
|
168 | 165 | return super(largefilesdirstate, self).normal(unixpath(f)) |
|
169 | 166 | |
|
170 | 167 | def remove(self, f): |
|
171 | 168 | return super(largefilesdirstate, self).remove(unixpath(f)) |
|
172 | 169 | |
|
173 | 170 | def add(self, f): |
|
174 | 171 | return super(largefilesdirstate, self).add(unixpath(f)) |
|
175 | 172 | |
|
176 | 173 | def drop(self, f): |
|
177 | 174 | return super(largefilesdirstate, self).drop(unixpath(f)) |
|
178 | 175 | |
|
179 | 176 | def forget(self, f): |
|
180 | 177 | return super(largefilesdirstate, self).forget(unixpath(f)) |
|
181 | 178 | |
|
182 | 179 | def normallookup(self, f): |
|
183 | 180 | return super(largefilesdirstate, self).normallookup(unixpath(f)) |
|
184 | 181 | |
|
185 | 182 | def _ignore(self, f): |
|
186 | 183 | return False |
|
187 | 184 | |
|
188 | 185 | def write(self, tr=False): |
|
189 | 186 | # (1) disable PENDING mode always |
|
190 | 187 | # (lfdirstate isn't yet managed as a part of the transaction) |
|
191 | 188 | # (2) avoid develwarn 'use dirstate.write with ....' |
|
192 | 189 | super(largefilesdirstate, self).write(None) |
|
193 | 190 | |
|
194 | 191 | |
|
195 | 192 | def openlfdirstate(ui, repo, create=True): |
|
196 | 193 | """ |
|
197 | 194 | Return a dirstate object that tracks largefiles: i.e. its root is |
|
198 | 195 | the repo root, but it is saved in .hg/largefiles/dirstate. |
|
199 | 196 | """ |
|
200 | 197 | vfs = repo.vfs |
|
201 | 198 | lfstoredir = longname |
|
202 | 199 | opener = vfsmod.vfs(vfs.join(lfstoredir)) |
|
203 | 200 | lfdirstate = largefilesdirstate( |
|
204 | 201 | opener, |
|
205 | 202 | ui, |
|
206 | 203 | repo.root, |
|
207 | 204 | repo.dirstate._validate, |
|
208 | 205 | lambda: sparse.matcher(repo), |
|
209 | 206 | repo.nodeconstants, |
|
210 | 207 | ) |
|
211 | 208 | |
|
212 | 209 | # If the largefiles dirstate does not exist, populate and create |
|
213 | 210 | # it. This ensures that we create it on the first meaningful |
|
214 | 211 | # largefiles operation in a new clone. |
|
215 | 212 | if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')): |
|
216 | 213 | matcher = getstandinmatcher(repo) |
|
217 | 214 | standins = repo.dirstate.walk( |
|
218 | 215 | matcher, subrepos=[], unknown=False, ignored=False |
|
219 | 216 | ) |
|
220 | 217 | |
|
221 | 218 | if len(standins) > 0: |
|
222 | 219 | vfs.makedirs(lfstoredir) |
|
223 | 220 | |
|
224 | 221 | for standin in standins: |
|
225 | 222 | lfile = splitstandin(standin) |
|
226 | 223 | lfdirstate.normallookup(lfile) |
|
227 | 224 | return lfdirstate |
|
228 | 225 | |
|
229 | 226 | |
|
230 | 227 | def lfdirstatestatus(lfdirstate, repo): |
|
231 | 228 | pctx = repo[b'.'] |
|
232 | 229 | match = matchmod.always() |
|
233 | 230 | unsure, s = lfdirstate.status( |
|
234 | 231 | match, subrepos=[], ignored=False, clean=False, unknown=False |
|
235 | 232 | ) |
|
236 | 233 | modified, clean = s.modified, s.clean |
|
237 | 234 | for lfile in unsure: |
|
238 | 235 | try: |
|
239 | 236 | fctx = pctx[standin(lfile)] |
|
240 | 237 | except LookupError: |
|
241 | 238 | fctx = None |
|
242 | 239 | if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)): |
|
243 | 240 | modified.append(lfile) |
|
244 | 241 | else: |
|
245 | 242 | clean.append(lfile) |
|
246 | 243 | lfdirstate.normal(lfile) |
|
247 | 244 | return s |
|
248 | 245 | |
|
249 | 246 | |
|
250 | 247 | def listlfiles(repo, rev=None, matcher=None): |
|
251 | 248 | """return a list of largefiles in the working copy or the |
|
252 | 249 | specified changeset""" |
|
253 | 250 | |
|
254 | 251 | if matcher is None: |
|
255 | 252 | matcher = getstandinmatcher(repo) |
|
256 | 253 | |
|
257 | 254 | # ignore unknown files in working directory |
|
258 | 255 | return [ |
|
259 | 256 | splitstandin(f) |
|
260 | 257 | for f in repo[rev].walk(matcher) |
|
261 | 258 | if rev is not None or repo.dirstate[f] != b'?' |
|
262 | 259 | ] |
|
263 | 260 | |
|
264 | 261 | |
|
265 | 262 | def instore(repo, hash, forcelocal=False): |
|
266 | 263 | '''Return true if a largefile with the given hash exists in the store''' |
|
267 | 264 | return os.path.exists(storepath(repo, hash, forcelocal)) |
|
268 | 265 | |
|
269 | 266 | |
|
270 | 267 | def storepath(repo, hash, forcelocal=False): |
|
271 | 268 | """Return the correct location in the repository largefiles store for a |
|
272 | 269 | file with the given hash.""" |
|
273 | 270 | if not forcelocal and repo.shared(): |
|
274 | 271 | return repo.vfs.reljoin(repo.sharedpath, longname, hash) |
|
275 | 272 | return repo.vfs.join(longname, hash) |
|
276 | 273 | |
|
277 | 274 | |
|
278 | 275 | def findstorepath(repo, hash): |
|
279 | 276 | """Search through the local store path(s) to find the file for the given |
|
280 | 277 | hash. If the file is not found, its path in the primary store is returned. |
|
281 | 278 | The return value is a tuple of (path, exists(path)). |
|
282 | 279 | """ |
|
283 | 280 | # For shared repos, the primary store is in the share source. But for |
|
284 | 281 | # backward compatibility, force a lookup in the local store if it wasn't |
|
285 | 282 | # found in the share source. |
|
286 | 283 | path = storepath(repo, hash, False) |
|
287 | 284 | |
|
288 | 285 | if instore(repo, hash): |
|
289 | 286 | return (path, True) |
|
290 | 287 | elif repo.shared() and instore(repo, hash, True): |
|
291 | 288 | return storepath(repo, hash, True), True |
|
292 | 289 | |
|
293 | 290 | return (path, False) |
|
294 | 291 | |
|
295 | 292 | |
|
296 | 293 | def copyfromcache(repo, hash, filename): |
|
297 | 294 | """Copy the specified largefile from the repo or system cache to |
|
298 | 295 | filename in the repository. Return true on success or false if the |
|
299 | 296 | file was not found in either cache (which should not happened: |
|
300 | 297 | this is meant to be called only after ensuring that the needed |
|
301 | 298 | largefile exists in the cache).""" |
|
302 | 299 | wvfs = repo.wvfs |
|
303 | 300 | path = findfile(repo, hash) |
|
304 | 301 | if path is None: |
|
305 | 302 | return False |
|
306 | 303 | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) |
|
307 | 304 | # The write may fail before the file is fully written, but we |
|
308 | 305 | # don't use atomic writes in the working copy. |
|
309 | 306 | with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd: |
|
310 | 307 | gothash = copyandhash(util.filechunkiter(srcfd), destfd) |
|
311 | 308 | if gothash != hash: |
|
312 | 309 | repo.ui.warn( |
|
313 | 310 | _(b'%s: data corruption in %s with hash %s\n') |
|
314 | 311 | % (filename, path, gothash) |
|
315 | 312 | ) |
|
316 | 313 | wvfs.unlink(filename) |
|
317 | 314 | return False |
|
318 | 315 | return True |
|
319 | 316 | |
|
320 | 317 | |
|
321 | 318 | def copytostore(repo, ctx, file, fstandin): |
|
322 | 319 | wvfs = repo.wvfs |
|
323 | 320 | hash = readasstandin(ctx[fstandin]) |
|
324 | 321 | if instore(repo, hash): |
|
325 | 322 | return |
|
326 | 323 | if wvfs.exists(file): |
|
327 | 324 | copytostoreabsolute(repo, wvfs.join(file), hash) |
|
328 | 325 | else: |
|
329 | 326 | repo.ui.warn( |
|
330 | 327 | _(b"%s: largefile %s not available from local store\n") |
|
331 | 328 | % (file, hash) |
|
332 | 329 | ) |
|
333 | 330 | |
|
334 | 331 | |
|
335 | 332 | def copyalltostore(repo, node): |
|
336 | 333 | '''Copy all largefiles in a given revision to the store''' |
|
337 | 334 | |
|
338 | 335 | ctx = repo[node] |
|
339 | 336 | for filename in ctx.files(): |
|
340 | 337 | realfile = splitstandin(filename) |
|
341 | 338 | if realfile is not None and filename in ctx.manifest(): |
|
342 | 339 | copytostore(repo, ctx, realfile, filename) |
|
343 | 340 | |
|
344 | 341 | |
|
345 | 342 | def copytostoreabsolute(repo, file, hash): |
|
346 | 343 | if inusercache(repo.ui, hash): |
|
347 | 344 | link(usercachepath(repo.ui, hash), storepath(repo, hash)) |
|
348 | 345 | else: |
|
349 | 346 | util.makedirs(os.path.dirname(storepath(repo, hash))) |
|
350 | 347 | with open(file, b'rb') as srcf: |
|
351 | 348 | with util.atomictempfile( |
|
352 | 349 | storepath(repo, hash), createmode=repo.store.createmode |
|
353 | 350 | ) as dstf: |
|
354 | 351 | for chunk in util.filechunkiter(srcf): |
|
355 | 352 | dstf.write(chunk) |
|
356 | 353 | linktousercache(repo, hash) |
|
357 | 354 | |
|
358 | 355 | |
|
359 | 356 | def linktousercache(repo, hash): |
|
360 | 357 | """Link / copy the largefile with the specified hash from the store |
|
361 | 358 | to the cache.""" |
|
362 | 359 | path = usercachepath(repo.ui, hash) |
|
363 | 360 | link(storepath(repo, hash), path) |
|
364 | 361 | |
|
365 | 362 | |
|
366 | 363 | def getstandinmatcher(repo, rmatcher=None): |
|
367 | 364 | '''Return a match object that applies rmatcher to the standin directory''' |
|
368 | 365 | wvfs = repo.wvfs |
|
369 | 366 | standindir = shortname |
|
370 | 367 | |
|
371 | 368 | # no warnings about missing files or directories |
|
372 | 369 | badfn = lambda f, msg: None |
|
373 | 370 | |
|
374 | 371 | if rmatcher and not rmatcher.always(): |
|
375 | 372 | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] |
|
376 | 373 | if not pats: |
|
377 | 374 | pats = [wvfs.join(standindir)] |
|
378 | 375 | match = scmutil.match(repo[None], pats, badfn=badfn) |
|
379 | 376 | else: |
|
380 | 377 | # no patterns: relative to repo root |
|
381 | 378 | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) |
|
382 | 379 | return match |
|
383 | 380 | |
|
384 | 381 | |
|
385 | 382 | def composestandinmatcher(repo, rmatcher): |
|
386 | 383 | """Return a matcher that accepts standins corresponding to the |
|
387 | 384 | files accepted by rmatcher. Pass the list of files in the matcher |
|
388 | 385 | as the paths specified by the user.""" |
|
389 | 386 | smatcher = getstandinmatcher(repo, rmatcher) |
|
390 | 387 | isstandin = smatcher.matchfn |
|
391 | 388 | |
|
392 | 389 | def composedmatchfn(f): |
|
393 | 390 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
|
394 | 391 | |
|
395 | 392 | smatcher.matchfn = composedmatchfn |
|
396 | 393 | |
|
397 | 394 | return smatcher |
|
398 | 395 | |
|
399 | 396 | |
|
400 | 397 | def standin(filename): |
|
401 | 398 | """Return the repo-relative path to the standin for the specified big |
|
402 | 399 | file.""" |
|
403 | 400 | # Notes: |
|
404 | 401 | # 1) Some callers want an absolute path, but for instance addlargefiles |
|
405 | 402 | # needs it repo-relative so it can be passed to repo[None].add(). So |
|
406 | 403 | # leave it up to the caller to use repo.wjoin() to get an absolute path. |
|
407 | 404 | # 2) Join with '/' because that's what dirstate always uses, even on |
|
408 | 405 | # Windows. Change existing separator to '/' first in case we are |
|
409 | 406 | # passed filenames from an external source (like the command line). |
|
410 | 407 | return shortnameslash + util.pconvert(filename) |
|
411 | 408 | |
|
412 | 409 | |
|
413 | 410 | def isstandin(filename): |
|
414 | 411 | """Return true if filename is a big file standin. filename must be |
|
415 | 412 | in Mercurial's internal form (slash-separated).""" |
|
416 | 413 | return filename.startswith(shortnameslash) |
|
417 | 414 | |
|
418 | 415 | |
|
419 | 416 | def splitstandin(filename): |
|
420 | 417 | # Split on / because that's what dirstate always uses, even on Windows. |
|
421 | 418 | # Change local separator to / first just in case we are passed filenames |
|
422 | 419 | # from an external source (like the command line). |
|
423 | 420 | bits = util.pconvert(filename).split(b'/', 1) |
|
424 | 421 | if len(bits) == 2 and bits[0] == shortname: |
|
425 | 422 | return bits[1] |
|
426 | 423 | else: |
|
427 | 424 | return None |
|
428 | 425 | |
|
429 | 426 | |
|
430 | 427 | def updatestandin(repo, lfile, standin): |
|
431 | 428 | """Re-calculate hash value of lfile and write it into standin |
|
432 | 429 | |
|
433 | 430 | This assumes that "lfutil.standin(lfile) == standin", for efficiency. |
|
434 | 431 | """ |
|
435 | 432 | file = repo.wjoin(lfile) |
|
436 | 433 | if repo.wvfs.exists(lfile): |
|
437 | 434 | hash = hashfile(file) |
|
438 | 435 | executable = getexecutable(file) |
|
439 | 436 | writestandin(repo, standin, hash, executable) |
|
440 | 437 | else: |
|
441 | 438 | raise error.Abort(_(b'%s: file not found!') % lfile) |
|
442 | 439 | |
|
443 | 440 | |
|
444 | 441 | def readasstandin(fctx): |
|
445 | 442 | """read hex hash from given filectx of standin file |
|
446 | 443 | |
|
447 | 444 | This encapsulates how "standin" data is stored into storage layer.""" |
|
448 | 445 | return fctx.data().strip() |
|
449 | 446 | |
|
450 | 447 | |
|
451 | 448 | def writestandin(repo, standin, hash, executable): |
|
452 | 449 | '''write hash to <repo.root>/<standin>''' |
|
453 | 450 | repo.wwrite(standin, hash + b'\n', executable and b'x' or b'') |
|
454 | 451 | |
|
455 | 452 | |
|
456 | 453 | def copyandhash(instream, outfile): |
|
457 | 454 | """Read bytes from instream (iterable) and write them to outfile, |
|
458 | 455 | computing the SHA-1 hash of the data along the way. Return the hash.""" |
|
459 | 456 | hasher = hashutil.sha1(b'') |
|
460 | 457 | for data in instream: |
|
461 | 458 | hasher.update(data) |
|
462 | 459 | outfile.write(data) |
|
463 | 460 | return hex(hasher.digest()) |
|
464 | 461 | |
|
465 | 462 | |
|
466 | 463 | def hashfile(file): |
|
467 | 464 | if not os.path.exists(file): |
|
468 | 465 | return b'' |
|
469 | 466 | with open(file, b'rb') as fd: |
|
470 | 467 | return hexsha1(fd) |
|
471 | 468 | |
|
472 | 469 | |
|
473 | 470 | def getexecutable(filename): |
|
474 | 471 | mode = os.stat(filename).st_mode |
|
475 | 472 | return ( |
|
476 | 473 | (mode & stat.S_IXUSR) |
|
477 | 474 | and (mode & stat.S_IXGRP) |
|
478 | 475 | and (mode & stat.S_IXOTH) |
|
479 | 476 | ) |
|
480 | 477 | |
|
481 | 478 | |
|
482 | 479 | def urljoin(first, second, *arg): |
|
483 | 480 | def join(left, right): |
|
484 | 481 | if not left.endswith(b'/'): |
|
485 | 482 | left += b'/' |
|
486 | 483 | if right.startswith(b'/'): |
|
487 | 484 | right = right[1:] |
|
488 | 485 | return left + right |
|
489 | 486 | |
|
490 | 487 | url = join(first, second) |
|
491 | 488 | for a in arg: |
|
492 | 489 | url = join(url, a) |
|
493 | 490 | return url |
|
494 | 491 | |
|
495 | 492 | |
|
496 | 493 | def hexsha1(fileobj): |
|
497 | 494 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like |
|
498 | 495 | object data""" |
|
499 | 496 | h = hashutil.sha1() |
|
500 | 497 | for chunk in util.filechunkiter(fileobj): |
|
501 | 498 | h.update(chunk) |
|
502 | 499 | return hex(h.digest()) |
|
503 | 500 | |
|
504 | 501 | |
|
505 | 502 | def httpsendfile(ui, filename): |
|
506 | 503 | return httpconnection.httpsendfile(ui, filename, b'rb') |
|
507 | 504 | |
|
508 | 505 | |
|
509 | 506 | def unixpath(path): |
|
510 | 507 | '''Return a version of path normalized for use with the lfdirstate.''' |
|
511 | 508 | return util.pconvert(os.path.normpath(path)) |
|
512 | 509 | |
|
513 | 510 | |
|
514 | 511 | def islfilesrepo(repo): |
|
515 | 512 | '''Return true if the repo is a largefile repo.''' |
|
516 | 513 | if b'largefiles' in repo.requirements and any( |
|
517 | 514 | shortnameslash in f[1] for f in repo.store.datafiles() |
|
518 | 515 | ): |
|
519 | 516 | return True |
|
520 | 517 | |
|
521 | 518 | return any(openlfdirstate(repo.ui, repo, False)) |
|
522 | 519 | |
|
523 | 520 | |
|
524 | 521 | class storeprotonotcapable(Exception): |
|
525 | 522 | def __init__(self, storetypes): |
|
526 | 523 | self.storetypes = storetypes |
|
527 | 524 | |
|
528 | 525 | |
|
529 | 526 | def getstandinsstate(repo): |
|
530 | 527 | standins = [] |
|
531 | 528 | matcher = getstandinmatcher(repo) |
|
532 | 529 | wctx = repo[None] |
|
533 | 530 | for standin in repo.dirstate.walk( |
|
534 | 531 | matcher, subrepos=[], unknown=False, ignored=False |
|
535 | 532 | ): |
|
536 | 533 | lfile = splitstandin(standin) |
|
537 | 534 | try: |
|
538 | 535 | hash = readasstandin(wctx[standin]) |
|
539 | 536 | except IOError: |
|
540 | 537 | hash = None |
|
541 | 538 | standins.append((lfile, hash)) |
|
542 | 539 | return standins |
|
543 | 540 | |
|
544 | 541 | |
|
545 | 542 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): |
|
546 | 543 | lfstandin = standin(lfile) |
|
547 | 544 | if lfstandin in repo.dirstate: |
|
548 | 545 | stat = repo.dirstate._map[lfstandin] |
|
549 | 546 | state, mtime = stat[0], stat[3] |
|
550 | 547 | else: |
|
551 | 548 | state, mtime = b'?', -1 |
|
552 | 549 | if state == b'n': |
|
553 | 550 | if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): |
|
554 | 551 | # state 'n' doesn't ensure 'clean' in this case |
|
555 | 552 | lfdirstate.normallookup(lfile) |
|
556 | 553 | else: |
|
557 | 554 | lfdirstate.normal(lfile) |
|
558 | 555 | elif state == b'm': |
|
559 | 556 | lfdirstate.normallookup(lfile) |
|
560 | 557 | elif state == b'r': |
|
561 | 558 | lfdirstate.remove(lfile) |
|
562 | 559 | elif state == b'a': |
|
563 | 560 | lfdirstate.add(lfile) |
|
564 | 561 | elif state == b'?': |
|
565 | 562 | lfdirstate.drop(lfile) |
|
566 | 563 | |
|
567 | 564 | |
|
568 | 565 | def markcommitted(orig, ctx, node): |
|
569 | 566 | repo = ctx.repo() |
|
570 | 567 | |
|
571 | 568 | orig(node) |
|
572 | 569 | |
|
573 | 570 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" |
|
574 | 571 | # because files coming from the 2nd parent are omitted in the latter. |
|
575 | 572 | # |
|
576 | 573 | # The former should be used to get targets of "synclfdirstate", |
|
577 | 574 | # because such files: |
|
578 | 575 | # - are marked as "a" by "patch.patch()" (e.g. via transplant), and |
|
579 | 576 | # - have to be marked as "n" after commit, but |
|
580 | 577 | # - aren't listed in "repo[node].files()" |
|
581 | 578 | |
|
582 | 579 | lfdirstate = openlfdirstate(repo.ui, repo) |
|
583 | 580 | for f in ctx.files(): |
|
584 | 581 | lfile = splitstandin(f) |
|
585 | 582 | if lfile is not None: |
|
586 | 583 | synclfdirstate(repo, lfdirstate, lfile, False) |
|
587 | 584 | lfdirstate.write() |
|
588 | 585 | |
|
589 | 586 | # As part of committing, copy all of the largefiles into the cache. |
|
590 | 587 | # |
|
591 | 588 | # Using "node" instead of "ctx" implies additional "repo[node]" |
|
592 | 589 | # lookup while copyalltostore(), but can omit redundant check for |
|
593 | 590 | # files comming from the 2nd parent, which should exist in store |
|
594 | 591 | # at merging. |
|
595 | 592 | copyalltostore(repo, node) |
|
596 | 593 | |
|
597 | 594 | |
|
598 | 595 | def getlfilestoupdate(oldstandins, newstandins): |
|
599 | 596 | changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) |
|
600 | 597 | filelist = [] |
|
601 | 598 | for f in changedstandins: |
|
602 | 599 | if f[0] not in filelist: |
|
603 | 600 | filelist.append(f[0]) |
|
604 | 601 | return filelist |
|
605 | 602 | |
|
606 | 603 | |
|
607 | 604 | def getlfilestoupload(repo, missing, addfunc): |
|
608 | 605 | makeprogress = repo.ui.makeprogress |
|
609 | 606 | with makeprogress( |
|
610 | 607 | _(b'finding outgoing largefiles'), |
|
611 | 608 | unit=_(b'revisions'), |
|
612 | 609 | total=len(missing), |
|
613 | 610 | ) as progress: |
|
614 | 611 | for i, n in enumerate(missing): |
|
615 | 612 | progress.update(i) |
|
616 | parents = [p for p in repo[n].parents() if p != nullid] | |
|
613 | parents = [p for p in repo[n].parents() if p != repo.nullid] | |
|
617 | 614 | |
|
618 | 615 | with lfstatus(repo, value=False): |
|
619 | 616 | ctx = repo[n] |
|
620 | 617 | |
|
621 | 618 | files = set(ctx.files()) |
|
622 | 619 | if len(parents) == 2: |
|
623 | 620 | mc = ctx.manifest() |
|
624 | 621 | mp1 = ctx.p1().manifest() |
|
625 | 622 | mp2 = ctx.p2().manifest() |
|
626 | 623 | for f in mp1: |
|
627 | 624 | if f not in mc: |
|
628 | 625 | files.add(f) |
|
629 | 626 | for f in mp2: |
|
630 | 627 | if f not in mc: |
|
631 | 628 | files.add(f) |
|
632 | 629 | for f in mc: |
|
633 | 630 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
634 | 631 | files.add(f) |
|
635 | 632 | for fn in files: |
|
636 | 633 | if isstandin(fn) and fn in ctx: |
|
637 | 634 | addfunc(fn, readasstandin(ctx[fn])) |
|
638 | 635 | |
|
639 | 636 | |
|
640 | 637 | def updatestandinsbymatch(repo, match): |
|
641 | 638 | """Update standins in the working directory according to specified match |
|
642 | 639 | |
|
643 | 640 | This returns (possibly modified) ``match`` object to be used for |
|
644 | 641 | subsequent commit process. |
|
645 | 642 | """ |
|
646 | 643 | |
|
647 | 644 | ui = repo.ui |
|
648 | 645 | |
|
649 | 646 | # Case 1: user calls commit with no specific files or |
|
650 | 647 | # include/exclude patterns: refresh and commit all files that |
|
651 | 648 | # are "dirty". |
|
652 | 649 | if match is None or match.always(): |
|
653 | 650 | # Spend a bit of time here to get a list of files we know |
|
654 | 651 | # are modified so we can compare only against those. |
|
655 | 652 | # It can cost a lot of time (several seconds) |
|
656 | 653 | # otherwise to update all standins if the largefiles are |
|
657 | 654 | # large. |
|
658 | 655 | lfdirstate = openlfdirstate(ui, repo) |
|
659 | 656 | dirtymatch = matchmod.always() |
|
660 | 657 | unsure, s = lfdirstate.status( |
|
661 | 658 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False |
|
662 | 659 | ) |
|
663 | 660 | modifiedfiles = unsure + s.modified + s.added + s.removed |
|
664 | 661 | lfiles = listlfiles(repo) |
|
665 | 662 | # this only loops through largefiles that exist (not |
|
666 | 663 | # removed/renamed) |
|
667 | 664 | for lfile in lfiles: |
|
668 | 665 | if lfile in modifiedfiles: |
|
669 | 666 | fstandin = standin(lfile) |
|
670 | 667 | if repo.wvfs.exists(fstandin): |
|
671 | 668 | # this handles the case where a rebase is being |
|
672 | 669 | # performed and the working copy is not updated |
|
673 | 670 | # yet. |
|
674 | 671 | if repo.wvfs.exists(lfile): |
|
675 | 672 | updatestandin(repo, lfile, fstandin) |
|
676 | 673 | |
|
677 | 674 | return match |
|
678 | 675 | |
|
679 | 676 | lfiles = listlfiles(repo) |
|
680 | 677 | match._files = repo._subdirlfs(match.files(), lfiles) |
|
681 | 678 | |
|
682 | 679 | # Case 2: user calls commit with specified patterns: refresh |
|
683 | 680 | # any matching big files. |
|
684 | 681 | smatcher = composestandinmatcher(repo, match) |
|
685 | 682 | standins = repo.dirstate.walk( |
|
686 | 683 | smatcher, subrepos=[], unknown=False, ignored=False |
|
687 | 684 | ) |
|
688 | 685 | |
|
689 | 686 | # No matching big files: get out of the way and pass control to |
|
690 | 687 | # the usual commit() method. |
|
691 | 688 | if not standins: |
|
692 | 689 | return match |
|
693 | 690 | |
|
694 | 691 | # Refresh all matching big files. It's possible that the |
|
695 | 692 | # commit will end up failing, in which case the big files will |
|
696 | 693 | # stay refreshed. No harm done: the user modified them and |
|
697 | 694 | # asked to commit them, so sooner or later we're going to |
|
698 | 695 | # refresh the standins. Might as well leave them refreshed. |
|
699 | 696 | lfdirstate = openlfdirstate(ui, repo) |
|
700 | 697 | for fstandin in standins: |
|
701 | 698 | lfile = splitstandin(fstandin) |
|
702 | 699 | if lfdirstate[lfile] != b'r': |
|
703 | 700 | updatestandin(repo, lfile, fstandin) |
|
704 | 701 | |
|
705 | 702 | # Cook up a new matcher that only matches regular files or |
|
706 | 703 | # standins corresponding to the big files requested by the |
|
707 | 704 | # user. Have to modify _files to prevent commit() from |
|
708 | 705 | # complaining "not tracked" for big files. |
|
709 | 706 | match = copy.copy(match) |
|
710 | 707 | origmatchfn = match.matchfn |
|
711 | 708 | |
|
712 | 709 | # Check both the list of largefiles and the list of |
|
713 | 710 | # standins because if a largefile was removed, it |
|
714 | 711 | # won't be in the list of largefiles at this point |
|
715 | 712 | match._files += sorted(standins) |
|
716 | 713 | |
|
717 | 714 | actualfiles = [] |
|
718 | 715 | for f in match._files: |
|
719 | 716 | fstandin = standin(f) |
|
720 | 717 | |
|
721 | 718 | # For largefiles, only one of the normal and standin should be |
|
722 | 719 | # committed (except if one of them is a remove). In the case of a |
|
723 | 720 | # standin removal, drop the normal file if it is unknown to dirstate. |
|
724 | 721 | # Thus, skip plain largefile names but keep the standin. |
|
725 | 722 | if f in lfiles or fstandin in standins: |
|
726 | 723 | if repo.dirstate[fstandin] != b'r': |
|
727 | 724 | if repo.dirstate[f] != b'r': |
|
728 | 725 | continue |
|
729 | 726 | elif repo.dirstate[f] == b'?': |
|
730 | 727 | continue |
|
731 | 728 | |
|
732 | 729 | actualfiles.append(f) |
|
733 | 730 | match._files = actualfiles |
|
734 | 731 | |
|
735 | 732 | def matchfn(f): |
|
736 | 733 | if origmatchfn(f): |
|
737 | 734 | return f not in lfiles |
|
738 | 735 | else: |
|
739 | 736 | return f in standins |
|
740 | 737 | |
|
741 | 738 | match.matchfn = matchfn |
|
742 | 739 | |
|
743 | 740 | return match |
|
744 | 741 | |
|
745 | 742 | |
|
746 | 743 | class automatedcommithook(object): |
|
747 | 744 | """Stateful hook to update standins at the 1st commit of resuming |
|
748 | 745 | |
|
749 | 746 | For efficiency, updating standins in the working directory should |
|
750 | 747 | be avoided while automated committing (like rebase, transplant and |
|
751 | 748 | so on), because they should be updated before committing. |
|
752 | 749 | |
|
753 | 750 | But the 1st commit of resuming automated committing (e.g. ``rebase |
|
754 | 751 | --continue``) should update them, because largefiles may be |
|
755 | 752 | modified manually. |
|
756 | 753 | """ |
|
757 | 754 | |
|
758 | 755 | def __init__(self, resuming): |
|
759 | 756 | self.resuming = resuming |
|
760 | 757 | |
|
761 | 758 | def __call__(self, repo, match): |
|
762 | 759 | if self.resuming: |
|
763 | 760 | self.resuming = False # avoids updating at subsequent commits |
|
764 | 761 | return updatestandinsbymatch(repo, match) |
|
765 | 762 | else: |
|
766 | 763 | return match |
|
767 | 764 | |
|
768 | 765 | |
|
769 | 766 | def getstatuswriter(ui, repo, forcibly=None): |
|
770 | 767 | """Return the function to write largefiles specific status out |
|
771 | 768 | |
|
772 | 769 | If ``forcibly`` is ``None``, this returns the last element of |
|
773 | 770 | ``repo._lfstatuswriters`` as "default" writer function. |
|
774 | 771 | |
|
775 | 772 | Otherwise, this returns the function to always write out (or |
|
776 | 773 | ignore if ``not forcibly``) status. |
|
777 | 774 | """ |
|
778 | 775 | if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'): |
|
779 | 776 | return repo._lfstatuswriters[-1] |
|
780 | 777 | else: |
|
781 | 778 | if forcibly: |
|
782 | 779 | return ui.status # forcibly WRITE OUT |
|
783 | 780 | else: |
|
784 | 781 | return lambda *msg, **opts: None # forcibly IGNORE |
@@ -1,550 +1,550 b'' | |||
|
1 | 1 | # wrapper.py - methods wrapping core mercurial logic |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import hashlib |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 |
from mercurial.node import bin, hex, |
|
|
13 | from mercurial.node import bin, hex, short | |
|
14 | 14 | from mercurial.pycompat import ( |
|
15 | 15 | getattr, |
|
16 | 16 | setattr, |
|
17 | 17 | ) |
|
18 | 18 | |
|
19 | 19 | from mercurial import ( |
|
20 | 20 | bundle2, |
|
21 | 21 | changegroup, |
|
22 | 22 | cmdutil, |
|
23 | 23 | context, |
|
24 | 24 | error, |
|
25 | 25 | exchange, |
|
26 | 26 | exthelper, |
|
27 | 27 | localrepo, |
|
28 | 28 | pycompat, |
|
29 | 29 | revlog, |
|
30 | 30 | scmutil, |
|
31 | 31 | util, |
|
32 | 32 | vfs as vfsmod, |
|
33 | 33 | wireprotov1server, |
|
34 | 34 | ) |
|
35 | 35 | |
|
36 | 36 | from mercurial.upgrade_utils import ( |
|
37 | 37 | actions as upgrade_actions, |
|
38 | 38 | engine as upgrade_engine, |
|
39 | 39 | ) |
|
40 | 40 | |
|
41 | 41 | from mercurial.interfaces import repository |
|
42 | 42 | |
|
43 | 43 | from mercurial.utils import ( |
|
44 | 44 | storageutil, |
|
45 | 45 | stringutil, |
|
46 | 46 | ) |
|
47 | 47 | |
|
48 | 48 | from ..largefiles import lfutil |
|
49 | 49 | |
|
50 | 50 | from . import ( |
|
51 | 51 | blobstore, |
|
52 | 52 | pointer, |
|
53 | 53 | ) |
|
54 | 54 | |
|
55 | 55 | eh = exthelper.exthelper() |
|
56 | 56 | |
|
57 | 57 | |
|
58 | 58 | @eh.wrapfunction(localrepo, b'makefilestorage') |
|
59 | 59 | def localrepomakefilestorage(orig, requirements, features, **kwargs): |
|
60 | 60 | if b'lfs' in requirements: |
|
61 | 61 | features.add(repository.REPO_FEATURE_LFS) |
|
62 | 62 | |
|
63 | 63 | return orig(requirements=requirements, features=features, **kwargs) |
|
64 | 64 | |
|
65 | 65 | |
|
66 | 66 | @eh.wrapfunction(changegroup, b'allsupportedversions') |
|
67 | 67 | def allsupportedversions(orig, ui): |
|
68 | 68 | versions = orig(ui) |
|
69 | 69 | versions.add(b'03') |
|
70 | 70 | return versions |
|
71 | 71 | |
|
72 | 72 | |
|
73 | 73 | @eh.wrapfunction(wireprotov1server, b'_capabilities') |
|
74 | 74 | def _capabilities(orig, repo, proto): |
|
75 | 75 | '''Wrap server command to announce lfs server capability''' |
|
76 | 76 | caps = orig(repo, proto) |
|
77 | 77 | if util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
78 | 78 | # Advertise a slightly different capability when lfs is *required*, so |
|
79 | 79 | # that the client knows it MUST load the extension. If lfs is not |
|
80 | 80 | # required on the server, there's no reason to autoload the extension |
|
81 | 81 | # on the client. |
|
82 | 82 | if b'lfs' in repo.requirements: |
|
83 | 83 | caps.append(b'lfs-serve') |
|
84 | 84 | |
|
85 | 85 | caps.append(b'lfs') |
|
86 | 86 | return caps |
|
87 | 87 | |
|
88 | 88 | |
|
89 | 89 | def bypasscheckhash(self, text): |
|
90 | 90 | return False |
|
91 | 91 | |
|
92 | 92 | |
|
93 | 93 | def readfromstore(self, text): |
|
94 | 94 | """Read filelog content from local blobstore transform for flagprocessor. |
|
95 | 95 | |
|
96 | 96 | Default tranform for flagprocessor, returning contents from blobstore. |
|
97 | 97 | Returns a 2-typle (text, validatehash) where validatehash is True as the |
|
98 | 98 | contents of the blobstore should be checked using checkhash. |
|
99 | 99 | """ |
|
100 | 100 | p = pointer.deserialize(text) |
|
101 | 101 | oid = p.oid() |
|
102 | 102 | store = self.opener.lfslocalblobstore |
|
103 | 103 | if not store.has(oid): |
|
104 | 104 | p.filename = self.filename |
|
105 | 105 | self.opener.lfsremoteblobstore.readbatch([p], store) |
|
106 | 106 | |
|
107 | 107 | # The caller will validate the content |
|
108 | 108 | text = store.read(oid, verify=False) |
|
109 | 109 | |
|
110 | 110 | # pack hg filelog metadata |
|
111 | 111 | hgmeta = {} |
|
112 | 112 | for k in p.keys(): |
|
113 | 113 | if k.startswith(b'x-hg-'): |
|
114 | 114 | name = k[len(b'x-hg-') :] |
|
115 | 115 | hgmeta[name] = p[k] |
|
116 | 116 | if hgmeta or text.startswith(b'\1\n'): |
|
117 | 117 | text = storageutil.packmeta(hgmeta, text) |
|
118 | 118 | |
|
119 | 119 | return (text, True) |
|
120 | 120 | |
|
121 | 121 | |
|
122 | 122 | def writetostore(self, text): |
|
123 | 123 | # hg filelog metadata (includes rename, etc) |
|
124 | 124 | hgmeta, offset = storageutil.parsemeta(text) |
|
125 | 125 | if offset and offset > 0: |
|
126 | 126 | # lfs blob does not contain hg filelog metadata |
|
127 | 127 | text = text[offset:] |
|
128 | 128 | |
|
129 | 129 | # git-lfs only supports sha256 |
|
130 | 130 | oid = hex(hashlib.sha256(text).digest()) |
|
131 | 131 | self.opener.lfslocalblobstore.write(oid, text) |
|
132 | 132 | |
|
133 | 133 | # replace contents with metadata |
|
134 | 134 | longoid = b'sha256:%s' % oid |
|
135 | 135 | metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) |
|
136 | 136 | |
|
137 | 137 | # by default, we expect the content to be binary. however, LFS could also |
|
138 | 138 | # be used for non-binary content. add a special entry for non-binary data. |
|
139 | 139 | # this will be used by filectx.isbinary(). |
|
140 | 140 | if not stringutil.binary(text): |
|
141 | 141 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix |
|
142 | 142 | metadata[b'x-is-binary'] = b'0' |
|
143 | 143 | |
|
144 | 144 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix |
|
145 | 145 | if hgmeta is not None: |
|
146 | 146 | for k, v in pycompat.iteritems(hgmeta): |
|
147 | 147 | metadata[b'x-hg-%s' % k] = v |
|
148 | 148 | |
|
149 | 149 | rawtext = metadata.serialize() |
|
150 | 150 | return (rawtext, False) |
|
151 | 151 | |
|
152 | 152 | |
|
153 | 153 | def _islfs(rlog, node=None, rev=None): |
|
154 | 154 | if rev is None: |
|
155 | 155 | if node is None: |
|
156 | 156 | # both None - likely working copy content where node is not ready |
|
157 | 157 | return False |
|
158 | 158 | rev = rlog.rev(node) |
|
159 | 159 | else: |
|
160 | 160 | node = rlog.node(rev) |
|
161 | if node == nullid: | |
|
161 | if node == rlog.nullid: | |
|
162 | 162 | return False |
|
163 | 163 | flags = rlog.flags(rev) |
|
164 | 164 | return bool(flags & revlog.REVIDX_EXTSTORED) |
|
165 | 165 | |
|
166 | 166 | |
|
167 | 167 | # Wrapping may also be applied by remotefilelog |
|
168 | 168 | def filelogaddrevision( |
|
169 | 169 | orig, |
|
170 | 170 | self, |
|
171 | 171 | text, |
|
172 | 172 | transaction, |
|
173 | 173 | link, |
|
174 | 174 | p1, |
|
175 | 175 | p2, |
|
176 | 176 | cachedelta=None, |
|
177 | 177 | node=None, |
|
178 | 178 | flags=revlog.REVIDX_DEFAULT_FLAGS, |
|
179 | 179 | **kwds |
|
180 | 180 | ): |
|
181 | 181 | # The matcher isn't available if reposetup() wasn't called. |
|
182 | 182 | lfstrack = self._revlog.opener.options.get(b'lfstrack') |
|
183 | 183 | |
|
184 | 184 | if lfstrack: |
|
185 | 185 | textlen = len(text) |
|
186 | 186 | # exclude hg rename meta from file size |
|
187 | 187 | meta, offset = storageutil.parsemeta(text) |
|
188 | 188 | if offset: |
|
189 | 189 | textlen -= offset |
|
190 | 190 | |
|
191 | 191 | if lfstrack(self._revlog.filename, textlen): |
|
192 | 192 | flags |= revlog.REVIDX_EXTSTORED |
|
193 | 193 | |
|
194 | 194 | return orig( |
|
195 | 195 | self, |
|
196 | 196 | text, |
|
197 | 197 | transaction, |
|
198 | 198 | link, |
|
199 | 199 | p1, |
|
200 | 200 | p2, |
|
201 | 201 | cachedelta=cachedelta, |
|
202 | 202 | node=node, |
|
203 | 203 | flags=flags, |
|
204 | 204 | **kwds |
|
205 | 205 | ) |
|
206 | 206 | |
|
207 | 207 | |
|
208 | 208 | # Wrapping may also be applied by remotefilelog |
|
209 | 209 | def filelogrenamed(orig, self, node): |
|
210 | 210 | if _islfs(self._revlog, node): |
|
211 | 211 | rawtext = self._revlog.rawdata(node) |
|
212 | 212 | if not rawtext: |
|
213 | 213 | return False |
|
214 | 214 | metadata = pointer.deserialize(rawtext) |
|
215 | 215 | if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: |
|
216 | 216 | return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) |
|
217 | 217 | else: |
|
218 | 218 | return False |
|
219 | 219 | return orig(self, node) |
|
220 | 220 | |
|
221 | 221 | |
|
222 | 222 | # Wrapping may also be applied by remotefilelog |
|
223 | 223 | def filelogsize(orig, self, rev): |
|
224 | 224 | if _islfs(self._revlog, rev=rev): |
|
225 | 225 | # fast path: use lfs metadata to answer size |
|
226 | 226 | rawtext = self._revlog.rawdata(rev) |
|
227 | 227 | metadata = pointer.deserialize(rawtext) |
|
228 | 228 | return int(metadata[b'size']) |
|
229 | 229 | return orig(self, rev) |
|
230 | 230 | |
|
231 | 231 | |
|
232 | 232 | @eh.wrapfunction(revlog, b'_verify_revision') |
|
233 | 233 | def _verify_revision(orig, rl, skipflags, state, node): |
|
234 | 234 | if _islfs(rl, node=node): |
|
235 | 235 | rawtext = rl.rawdata(node) |
|
236 | 236 | metadata = pointer.deserialize(rawtext) |
|
237 | 237 | |
|
238 | 238 | # Don't skip blobs that are stored locally, as local verification is |
|
239 | 239 | # relatively cheap and there's no other way to verify the raw data in |
|
240 | 240 | # the revlog. |
|
241 | 241 | if rl.opener.lfslocalblobstore.has(metadata.oid()): |
|
242 | 242 | skipflags &= ~revlog.REVIDX_EXTSTORED |
|
243 | 243 | elif skipflags & revlog.REVIDX_EXTSTORED: |
|
244 | 244 | # The wrapped method will set `skipread`, but there's enough local |
|
245 | 245 | # info to check renames. |
|
246 | 246 | state[b'safe_renamed'].add(node) |
|
247 | 247 | |
|
248 | 248 | orig(rl, skipflags, state, node) |
|
249 | 249 | |
|
250 | 250 | |
|
251 | 251 | @eh.wrapfunction(context.basefilectx, b'cmp') |
|
252 | 252 | def filectxcmp(orig, self, fctx): |
|
253 | 253 | """returns True if text is different than fctx""" |
|
254 | 254 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
|
255 | 255 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): |
|
256 | 256 | # fast path: check LFS oid |
|
257 | 257 | p1 = pointer.deserialize(self.rawdata()) |
|
258 | 258 | p2 = pointer.deserialize(fctx.rawdata()) |
|
259 | 259 | return p1.oid() != p2.oid() |
|
260 | 260 | return orig(self, fctx) |
|
261 | 261 | |
|
262 | 262 | |
|
263 | 263 | @eh.wrapfunction(context.basefilectx, b'isbinary') |
|
264 | 264 | def filectxisbinary(orig, self): |
|
265 | 265 | if self.islfs(): |
|
266 | 266 | # fast path: use lfs metadata to answer isbinary |
|
267 | 267 | metadata = pointer.deserialize(self.rawdata()) |
|
268 | 268 | # if lfs metadata says nothing, assume it's binary by default |
|
269 | 269 | return bool(int(metadata.get(b'x-is-binary', 1))) |
|
270 | 270 | return orig(self) |
|
271 | 271 | |
|
272 | 272 | |
|
273 | 273 | def filectxislfs(self): |
|
274 | 274 | return _islfs(self.filelog()._revlog, self.filenode()) |
|
275 | 275 | |
|
276 | 276 | |
|
277 | 277 | @eh.wrapfunction(cmdutil, b'_updatecatformatter') |
|
278 | 278 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
|
279 | 279 | orig(fm, ctx, matcher, path, decode) |
|
280 | 280 | fm.data(rawdata=ctx[path].rawdata()) |
|
281 | 281 | |
|
282 | 282 | |
|
283 | 283 | @eh.wrapfunction(scmutil, b'wrapconvertsink') |
|
284 | 284 | def convertsink(orig, sink): |
|
285 | 285 | sink = orig(sink) |
|
286 | 286 | if sink.repotype == b'hg': |
|
287 | 287 | |
|
288 | 288 | class lfssink(sink.__class__): |
|
289 | 289 | def putcommit( |
|
290 | 290 | self, |
|
291 | 291 | files, |
|
292 | 292 | copies, |
|
293 | 293 | parents, |
|
294 | 294 | commit, |
|
295 | 295 | source, |
|
296 | 296 | revmap, |
|
297 | 297 | full, |
|
298 | 298 | cleanp2, |
|
299 | 299 | ): |
|
300 | 300 | pc = super(lfssink, self).putcommit |
|
301 | 301 | node = pc( |
|
302 | 302 | files, |
|
303 | 303 | copies, |
|
304 | 304 | parents, |
|
305 | 305 | commit, |
|
306 | 306 | source, |
|
307 | 307 | revmap, |
|
308 | 308 | full, |
|
309 | 309 | cleanp2, |
|
310 | 310 | ) |
|
311 | 311 | |
|
312 | 312 | if b'lfs' not in self.repo.requirements: |
|
313 | 313 | ctx = self.repo[node] |
|
314 | 314 | |
|
315 | 315 | # The file list may contain removed files, so check for |
|
316 | 316 | # membership before assuming it is in the context. |
|
317 | 317 | if any(f in ctx and ctx[f].islfs() for f, n in files): |
|
318 | 318 | self.repo.requirements.add(b'lfs') |
|
319 | 319 | scmutil.writereporequirements(self.repo) |
|
320 | 320 | |
|
321 | 321 | return node |
|
322 | 322 | |
|
323 | 323 | sink.__class__ = lfssink |
|
324 | 324 | |
|
325 | 325 | return sink |
|
326 | 326 | |
|
327 | 327 | |
|
328 | 328 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
|
329 | 329 | # options and blob stores are passed from othervfs to the new readonlyvfs. |
|
330 | 330 | @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') |
|
331 | 331 | def vfsinit(orig, self, othervfs): |
|
332 | 332 | orig(self, othervfs) |
|
333 | 333 | # copy lfs related options |
|
334 | 334 | for k, v in othervfs.options.items(): |
|
335 | 335 | if k.startswith(b'lfs'): |
|
336 | 336 | self.options[k] = v |
|
337 | 337 | # also copy lfs blobstores. note: this can run before reposetup, so lfs |
|
338 | 338 | # blobstore attributes are not always ready at this time. |
|
339 | 339 | for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: |
|
340 | 340 | if util.safehasattr(othervfs, name): |
|
341 | 341 | setattr(self, name, getattr(othervfs, name)) |
|
342 | 342 | |
|
343 | 343 | |
|
344 | 344 | def _prefetchfiles(repo, revmatches): |
|
345 | 345 | """Ensure that required LFS blobs are present, fetching them as a group if |
|
346 | 346 | needed.""" |
|
347 | 347 | if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
348 | 348 | return |
|
349 | 349 | |
|
350 | 350 | pointers = [] |
|
351 | 351 | oids = set() |
|
352 | 352 | localstore = repo.svfs.lfslocalblobstore |
|
353 | 353 | |
|
354 | 354 | for rev, match in revmatches: |
|
355 | 355 | ctx = repo[rev] |
|
356 | 356 | for f in ctx.walk(match): |
|
357 | 357 | p = pointerfromctx(ctx, f) |
|
358 | 358 | if p and p.oid() not in oids and not localstore.has(p.oid()): |
|
359 | 359 | p.filename = f |
|
360 | 360 | pointers.append(p) |
|
361 | 361 | oids.add(p.oid()) |
|
362 | 362 | |
|
363 | 363 | if pointers: |
|
364 | 364 | # Recalculating the repo store here allows 'paths.default' that is set |
|
365 | 365 | # on the repo by a clone command to be used for the update. |
|
366 | 366 | blobstore.remote(repo).readbatch(pointers, localstore) |
|
367 | 367 | |
|
368 | 368 | |
|
369 | 369 | def _canskipupload(repo): |
|
370 | 370 | # Skip if this hasn't been passed to reposetup() |
|
371 | 371 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
372 | 372 | return True |
|
373 | 373 | |
|
374 | 374 | # if remotestore is a null store, upload is a no-op and can be skipped |
|
375 | 375 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
376 | 376 | |
|
377 | 377 | |
|
378 | 378 | def candownload(repo): |
|
379 | 379 | # Skip if this hasn't been passed to reposetup() |
|
380 | 380 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
381 | 381 | return False |
|
382 | 382 | |
|
383 | 383 | # if remotestore is a null store, downloads will lead to nothing |
|
384 | 384 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
385 | 385 | |
|
386 | 386 | |
|
387 | 387 | def uploadblobsfromrevs(repo, revs): |
|
388 | 388 | """upload lfs blobs introduced by revs |
|
389 | 389 | |
|
390 | 390 | Note: also used by other extensions e. g. infinitepush. avoid renaming. |
|
391 | 391 | """ |
|
392 | 392 | if _canskipupload(repo): |
|
393 | 393 | return |
|
394 | 394 | pointers = extractpointers(repo, revs) |
|
395 | 395 | uploadblobs(repo, pointers) |
|
396 | 396 | |
|
397 | 397 | |
|
398 | 398 | def prepush(pushop): |
|
399 | 399 | """Prepush hook. |
|
400 | 400 | |
|
401 | 401 | Read through the revisions to push, looking for filelog entries that can be |
|
402 | 402 | deserialized into metadata so that we can block the push on their upload to |
|
403 | 403 | the remote blobstore. |
|
404 | 404 | """ |
|
405 | 405 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
|
406 | 406 | |
|
407 | 407 | |
|
408 | 408 | @eh.wrapfunction(exchange, b'push') |
|
409 | 409 | def push(orig, repo, remote, *args, **kwargs): |
|
410 | 410 | """bail on push if the extension isn't enabled on remote when needed, and |
|
411 | 411 | update the remote store based on the destination path.""" |
|
412 | 412 | if b'lfs' in repo.requirements: |
|
413 | 413 | # If the remote peer is for a local repo, the requirement tests in the |
|
414 | 414 | # base class method enforce lfs support. Otherwise, some revisions in |
|
415 | 415 | # this repo use lfs, and the remote repo needs the extension loaded. |
|
416 | 416 | if not remote.local() and not remote.capable(b'lfs'): |
|
417 | 417 | # This is a copy of the message in exchange.push() when requirements |
|
418 | 418 | # are missing between local repos. |
|
419 | 419 | m = _(b"required features are not supported in the destination: %s") |
|
420 | 420 | raise error.Abort( |
|
421 | 421 | m % b'lfs', hint=_(b'enable the lfs extension on the server') |
|
422 | 422 | ) |
|
423 | 423 | |
|
424 | 424 | # Repositories where this extension is disabled won't have the field. |
|
425 | 425 | # But if there's a requirement, then the extension must be loaded AND |
|
426 | 426 | # there may be blobs to push. |
|
427 | 427 | remotestore = repo.svfs.lfsremoteblobstore |
|
428 | 428 | try: |
|
429 | 429 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) |
|
430 | 430 | return orig(repo, remote, *args, **kwargs) |
|
431 | 431 | finally: |
|
432 | 432 | repo.svfs.lfsremoteblobstore = remotestore |
|
433 | 433 | else: |
|
434 | 434 | return orig(repo, remote, *args, **kwargs) |
|
435 | 435 | |
|
436 | 436 | |
|
437 | 437 | # when writing a bundle via "hg bundle" command, upload related LFS blobs |
|
438 | 438 | @eh.wrapfunction(bundle2, b'writenewbundle') |
|
439 | 439 | def writenewbundle( |
|
440 | 440 | orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
441 | 441 | ): |
|
442 | 442 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
|
443 | 443 | uploadblobsfromrevs(repo, outgoing.missing) |
|
444 | 444 | return orig( |
|
445 | 445 | ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
446 | 446 | ) |
|
447 | 447 | |
|
448 | 448 | |
|
449 | 449 | def extractpointers(repo, revs): |
|
450 | 450 | """return a list of lfs pointers added by given revs""" |
|
451 | 451 | repo.ui.debug(b'lfs: computing set of blobs to upload\n') |
|
452 | 452 | pointers = {} |
|
453 | 453 | |
|
454 | 454 | makeprogress = repo.ui.makeprogress |
|
455 | 455 | with makeprogress( |
|
456 | 456 | _(b'lfs search'), _(b'changesets'), len(revs) |
|
457 | 457 | ) as progress: |
|
458 | 458 | for r in revs: |
|
459 | 459 | ctx = repo[r] |
|
460 | 460 | for p in pointersfromctx(ctx).values(): |
|
461 | 461 | pointers[p.oid()] = p |
|
462 | 462 | progress.increment() |
|
463 | 463 | return sorted(pointers.values(), key=lambda p: p.oid()) |
|
464 | 464 | |
|
465 | 465 | |
|
466 | 466 | def pointerfromctx(ctx, f, removed=False): |
|
467 | 467 | """return a pointer for the named file from the given changectx, or None if |
|
468 | 468 | the file isn't LFS. |
|
469 | 469 | |
|
470 | 470 | Optionally, the pointer for a file deleted from the context can be returned. |
|
471 | 471 | Since no such pointer is actually stored, and to distinguish from a non LFS |
|
472 | 472 | file, this pointer is represented by an empty dict. |
|
473 | 473 | """ |
|
474 | 474 | _ctx = ctx |
|
475 | 475 | if f not in ctx: |
|
476 | 476 | if not removed: |
|
477 | 477 | return None |
|
478 | 478 | if f in ctx.p1(): |
|
479 | 479 | _ctx = ctx.p1() |
|
480 | 480 | elif f in ctx.p2(): |
|
481 | 481 | _ctx = ctx.p2() |
|
482 | 482 | else: |
|
483 | 483 | return None |
|
484 | 484 | fctx = _ctx[f] |
|
485 | 485 | if not _islfs(fctx.filelog()._revlog, fctx.filenode()): |
|
486 | 486 | return None |
|
487 | 487 | try: |
|
488 | 488 | p = pointer.deserialize(fctx.rawdata()) |
|
489 | 489 | if ctx == _ctx: |
|
490 | 490 | return p |
|
491 | 491 | return {} |
|
492 | 492 | except pointer.InvalidPointer as ex: |
|
493 | 493 | raise error.Abort( |
|
494 | 494 | _(b'lfs: corrupted pointer (%s@%s): %s\n') |
|
495 | 495 | % (f, short(_ctx.node()), ex) |
|
496 | 496 | ) |
|
497 | 497 | |
|
498 | 498 | |
|
499 | 499 | def pointersfromctx(ctx, removed=False): |
|
500 | 500 | """return a dict {path: pointer} for given single changectx. |
|
501 | 501 | |
|
502 | 502 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value |
|
503 | 503 | stored for the path is an empty dict. |
|
504 | 504 | """ |
|
505 | 505 | result = {} |
|
506 | 506 | m = ctx.repo().narrowmatch() |
|
507 | 507 | |
|
508 | 508 | # TODO: consider manifest.fastread() instead |
|
509 | 509 | for f in ctx.files(): |
|
510 | 510 | if not m(f): |
|
511 | 511 | continue |
|
512 | 512 | p = pointerfromctx(ctx, f, removed=removed) |
|
513 | 513 | if p is not None: |
|
514 | 514 | result[f] = p |
|
515 | 515 | return result |
|
516 | 516 | |
|
517 | 517 | |
|
518 | 518 | def uploadblobs(repo, pointers): |
|
519 | 519 | """upload given pointers from local blobstore""" |
|
520 | 520 | if not pointers: |
|
521 | 521 | return |
|
522 | 522 | |
|
523 | 523 | remoteblob = repo.svfs.lfsremoteblobstore |
|
524 | 524 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
|
525 | 525 | |
|
526 | 526 | |
|
527 | 527 | @eh.wrapfunction(upgrade_engine, b'finishdatamigration') |
|
528 | 528 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
|
529 | 529 | orig(ui, srcrepo, dstrepo, requirements) |
|
530 | 530 | |
|
531 | 531 | # Skip if this hasn't been passed to reposetup() |
|
532 | 532 | if util.safehasattr( |
|
533 | 533 | srcrepo.svfs, b'lfslocalblobstore' |
|
534 | 534 | ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): |
|
535 | 535 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
|
536 | 536 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
|
537 | 537 | |
|
538 | 538 | for dirpath, dirs, files in srclfsvfs.walk(): |
|
539 | 539 | for oid in files: |
|
540 | 540 | ui.write(_(b'copying lfs blob %s\n') % oid) |
|
541 | 541 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
|
542 | 542 | |
|
543 | 543 | |
|
544 | 544 | @eh.wrapfunction(upgrade_actions, b'preservedrequirements') |
|
545 | 545 | @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements') |
|
546 | 546 | def upgraderequirements(orig, repo): |
|
547 | 547 | reqs = orig(repo) |
|
548 | 548 | if b'lfs' in repo.requirements: |
|
549 | 549 | reqs.add(b'lfs') |
|
550 | 550 | return reqs |
@@ -1,4315 +1,4314 b'' | |||
|
1 | 1 | # mq.py - patch queues for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''manage a stack of patches |
|
9 | 9 | |
|
10 | 10 | This extension lets you work with a stack of patches in a Mercurial |
|
11 | 11 | repository. It manages two stacks of patches - all known patches, and |
|
12 | 12 | applied patches (subset of known patches). |
|
13 | 13 | |
|
14 | 14 | Known patches are represented as patch files in the .hg/patches |
|
15 | 15 | directory. Applied patches are both patch files and changesets. |
|
16 | 16 | |
|
17 | 17 | Common tasks (use :hg:`help COMMAND` for more details):: |
|
18 | 18 | |
|
19 | 19 | create new patch qnew |
|
20 | 20 | import existing patch qimport |
|
21 | 21 | |
|
22 | 22 | print patch series qseries |
|
23 | 23 | print applied patches qapplied |
|
24 | 24 | |
|
25 | 25 | add known patch to applied stack qpush |
|
26 | 26 | remove patch from applied stack qpop |
|
27 | 27 | refresh contents of top applied patch qrefresh |
|
28 | 28 | |
|
29 | 29 | By default, mq will automatically use git patches when required to |
|
30 | 30 | avoid losing file mode changes, copy records, binary files or empty |
|
31 | 31 | files creations or deletions. This behavior can be configured with:: |
|
32 | 32 | |
|
33 | 33 | [mq] |
|
34 | 34 | git = auto/keep/yes/no |
|
35 | 35 | |
|
36 | 36 | If set to 'keep', mq will obey the [diff] section configuration while |
|
37 | 37 | preserving existing git patches upon qrefresh. If set to 'yes' or |
|
38 | 38 | 'no', mq will override the [diff] section and always generate git or |
|
39 | 39 | regular patches, possibly losing data in the second case. |
|
40 | 40 | |
|
41 | 41 | It may be desirable for mq changesets to be kept in the secret phase (see |
|
42 | 42 | :hg:`help phases`), which can be enabled with the following setting:: |
|
43 | 43 | |
|
44 | 44 | [mq] |
|
45 | 45 | secret = True |
|
46 | 46 | |
|
47 | 47 | You will by default be managing a patch queue named "patches". You can |
|
48 | 48 | create other, independent patch queues with the :hg:`qqueue` command. |
|
49 | 49 | |
|
50 | 50 | If the working directory contains uncommitted files, qpush, qpop and |
|
51 | 51 | qgoto abort immediately. If -f/--force is used, the changes are |
|
52 | 52 | discarded. Setting:: |
|
53 | 53 | |
|
54 | 54 | [mq] |
|
55 | 55 | keepchanges = True |
|
56 | 56 | |
|
57 | 57 | make them behave as if --keep-changes were passed, and non-conflicting |
|
58 | 58 | local changes will be tolerated and preserved. If incompatible options |
|
59 | 59 | such as -f/--force or --exact are passed, this setting is ignored. |
|
60 | 60 | |
|
61 | 61 | This extension used to provide a strip command. This command now lives |
|
62 | 62 | in the strip extension. |
|
63 | 63 | ''' |
|
64 | 64 | |
|
65 | 65 | from __future__ import absolute_import, print_function |
|
66 | 66 | |
|
67 | 67 | import errno |
|
68 | 68 | import os |
|
69 | 69 | import re |
|
70 | 70 | import shutil |
|
71 | 71 | import sys |
|
72 | 72 | from mercurial.i18n import _ |
|
73 | 73 | from mercurial.node import ( |
|
74 | 74 | bin, |
|
75 | 75 | hex, |
|
76 | nullid, | |
|
77 | 76 | nullrev, |
|
78 | 77 | short, |
|
79 | 78 | ) |
|
80 | 79 | from mercurial.pycompat import ( |
|
81 | 80 | delattr, |
|
82 | 81 | getattr, |
|
83 | 82 | open, |
|
84 | 83 | ) |
|
85 | 84 | from mercurial import ( |
|
86 | 85 | cmdutil, |
|
87 | 86 | commands, |
|
88 | 87 | dirstateguard, |
|
89 | 88 | encoding, |
|
90 | 89 | error, |
|
91 | 90 | extensions, |
|
92 | 91 | hg, |
|
93 | 92 | localrepo, |
|
94 | 93 | lock as lockmod, |
|
95 | 94 | logcmdutil, |
|
96 | 95 | patch as patchmod, |
|
97 | 96 | phases, |
|
98 | 97 | pycompat, |
|
99 | 98 | registrar, |
|
100 | 99 | revsetlang, |
|
101 | 100 | scmutil, |
|
102 | 101 | smartset, |
|
103 | 102 | strip, |
|
104 | 103 | subrepoutil, |
|
105 | 104 | util, |
|
106 | 105 | vfs as vfsmod, |
|
107 | 106 | ) |
|
108 | 107 | from mercurial.utils import ( |
|
109 | 108 | dateutil, |
|
110 | 109 | stringutil, |
|
111 | 110 | urlutil, |
|
112 | 111 | ) |
|
113 | 112 | |
|
114 | 113 | release = lockmod.release |
|
115 | 114 | seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))] |
|
116 | 115 | |
|
117 | 116 | cmdtable = {} |
|
118 | 117 | command = registrar.command(cmdtable) |
|
119 | 118 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
120 | 119 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
121 | 120 | # be specifying the version(s) of Mercurial they are tested with, or |
|
122 | 121 | # leave the attribute unspecified. |
|
123 | 122 | testedwith = b'ships-with-hg-core' |
|
124 | 123 | |
|
125 | 124 | configtable = {} |
|
126 | 125 | configitem = registrar.configitem(configtable) |
|
127 | 126 | |
|
128 | 127 | configitem( |
|
129 | 128 | b'mq', |
|
130 | 129 | b'git', |
|
131 | 130 | default=b'auto', |
|
132 | 131 | ) |
|
133 | 132 | configitem( |
|
134 | 133 | b'mq', |
|
135 | 134 | b'keepchanges', |
|
136 | 135 | default=False, |
|
137 | 136 | ) |
|
138 | 137 | configitem( |
|
139 | 138 | b'mq', |
|
140 | 139 | b'plain', |
|
141 | 140 | default=False, |
|
142 | 141 | ) |
|
143 | 142 | configitem( |
|
144 | 143 | b'mq', |
|
145 | 144 | b'secret', |
|
146 | 145 | default=False, |
|
147 | 146 | ) |
|
148 | 147 | |
|
149 | 148 | # force load strip extension formerly included in mq and import some utility |
|
150 | 149 | try: |
|
151 | 150 | extensions.find(b'strip') |
|
152 | 151 | except KeyError: |
|
153 | 152 | # note: load is lazy so we could avoid the try-except, |
|
154 | 153 | # but I (marmoute) prefer this explicit code. |
|
155 | 154 | class dummyui(object): |
|
156 | 155 | def debug(self, msg): |
|
157 | 156 | pass |
|
158 | 157 | |
|
159 | 158 | def log(self, event, msgfmt, *msgargs, **opts): |
|
160 | 159 | pass |
|
161 | 160 | |
|
162 | 161 | extensions.load(dummyui(), b'strip', b'') |
|
163 | 162 | |
|
164 | 163 | strip = strip.strip |
|
165 | 164 | |
|
166 | 165 | |
|
167 | 166 | def checksubstate(repo, baserev=None): |
|
168 | 167 | """return list of subrepos at a different revision than substate. |
|
169 | 168 | Abort if any subrepos have uncommitted changes.""" |
|
170 | 169 | inclsubs = [] |
|
171 | 170 | wctx = repo[None] |
|
172 | 171 | if baserev: |
|
173 | 172 | bctx = repo[baserev] |
|
174 | 173 | else: |
|
175 | 174 | bctx = wctx.p1() |
|
176 | 175 | for s in sorted(wctx.substate): |
|
177 | 176 | wctx.sub(s).bailifchanged(True) |
|
178 | 177 | if s not in bctx.substate or bctx.sub(s).dirty(): |
|
179 | 178 | inclsubs.append(s) |
|
180 | 179 | return inclsubs |
|
181 | 180 | |
|
182 | 181 | |
|
183 | 182 | # Patch names looks like unix-file names. |
|
184 | 183 | # They must be joinable with queue directory and result in the patch path. |
|
185 | 184 | normname = util.normpath |
|
186 | 185 | |
|
187 | 186 | |
|
188 | 187 | class statusentry(object): |
|
189 | 188 | def __init__(self, node, name): |
|
190 | 189 | self.node, self.name = node, name |
|
191 | 190 | |
|
192 | 191 | def __bytes__(self): |
|
193 | 192 | return hex(self.node) + b':' + self.name |
|
194 | 193 | |
|
195 | 194 | __str__ = encoding.strmethod(__bytes__) |
|
196 | 195 | __repr__ = encoding.strmethod(__bytes__) |
|
197 | 196 | |
|
198 | 197 | |
|
199 | 198 | # The order of the headers in 'hg export' HG patches: |
|
200 | 199 | HGHEADERS = [ |
|
201 | 200 | # '# HG changeset patch', |
|
202 | 201 | b'# User ', |
|
203 | 202 | b'# Date ', |
|
204 | 203 | b'# ', |
|
205 | 204 | b'# Branch ', |
|
206 | 205 | b'# Node ID ', |
|
207 | 206 | b'# Parent ', # can occur twice for merges - but that is not relevant for mq |
|
208 | 207 | ] |
|
209 | 208 | # The order of headers in plain 'mail style' patches: |
|
210 | 209 | PLAINHEADERS = { |
|
211 | 210 | b'from': 0, |
|
212 | 211 | b'date': 1, |
|
213 | 212 | b'subject': 2, |
|
214 | 213 | } |
|
215 | 214 | |
|
216 | 215 | |
|
217 | 216 | def inserthgheader(lines, header, value): |
|
218 | 217 | """Assuming lines contains a HG patch header, add a header line with value. |
|
219 | 218 | >>> try: inserthgheader([], b'# Date ', b'z') |
|
220 | 219 | ... except ValueError as inst: print("oops") |
|
221 | 220 | oops |
|
222 | 221 | >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z') |
|
223 | 222 | ['# HG changeset patch', '# Date z'] |
|
224 | 223 | >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z') |
|
225 | 224 | ['# HG changeset patch', '# Date z', ''] |
|
226 | 225 | >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z') |
|
227 | 226 | ['# HG changeset patch', '# User y', '# Date z'] |
|
228 | 227 | >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'], |
|
229 | 228 | ... b'# User ', b'z') |
|
230 | 229 | ['# HG changeset patch', '# Date x', '# User z'] |
|
231 | 230 | >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z') |
|
232 | 231 | ['# HG changeset patch', '# Date z'] |
|
233 | 232 | >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'], |
|
234 | 233 | ... b'# Date ', b'z') |
|
235 | 234 | ['# HG changeset patch', '# Date z', '', '# Date y'] |
|
236 | 235 | >>> inserthgheader([b'# HG changeset patch', b'# Parent y'], |
|
237 | 236 | ... b'# Date ', b'z') |
|
238 | 237 | ['# HG changeset patch', '# Date z', '# Parent y'] |
|
239 | 238 | """ |
|
240 | 239 | start = lines.index(b'# HG changeset patch') + 1 |
|
241 | 240 | newindex = HGHEADERS.index(header) |
|
242 | 241 | bestpos = len(lines) |
|
243 | 242 | for i in range(start, len(lines)): |
|
244 | 243 | line = lines[i] |
|
245 | 244 | if not line.startswith(b'# '): |
|
246 | 245 | bestpos = min(bestpos, i) |
|
247 | 246 | break |
|
248 | 247 | for lineindex, h in enumerate(HGHEADERS): |
|
249 | 248 | if line.startswith(h): |
|
250 | 249 | if lineindex == newindex: |
|
251 | 250 | lines[i] = header + value |
|
252 | 251 | return lines |
|
253 | 252 | if lineindex > newindex: |
|
254 | 253 | bestpos = min(bestpos, i) |
|
255 | 254 | break # next line |
|
256 | 255 | lines.insert(bestpos, header + value) |
|
257 | 256 | return lines |
|
258 | 257 | |
|
259 | 258 | |
|
260 | 259 | def insertplainheader(lines, header, value): |
|
261 | 260 | """For lines containing a plain patch header, add a header line with value. |
|
262 | 261 | >>> insertplainheader([], b'Date', b'z') |
|
263 | 262 | ['Date: z'] |
|
264 | 263 | >>> insertplainheader([b''], b'Date', b'z') |
|
265 | 264 | ['Date: z', ''] |
|
266 | 265 | >>> insertplainheader([b'x'], b'Date', b'z') |
|
267 | 266 | ['Date: z', '', 'x'] |
|
268 | 267 | >>> insertplainheader([b'From: y', b'x'], b'Date', b'z') |
|
269 | 268 | ['From: y', 'Date: z', '', 'x'] |
|
270 | 269 | >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z') |
|
271 | 270 | [' date : x', 'From: z', ''] |
|
272 | 271 | >>> insertplainheader([b'', b'Date: y'], b'Date', b'z') |
|
273 | 272 | ['Date: z', '', 'Date: y'] |
|
274 | 273 | >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y') |
|
275 | 274 | ['From: y', 'foo: bar', 'DATE: z', '', 'x'] |
|
276 | 275 | """ |
|
277 | 276 | newprio = PLAINHEADERS[header.lower()] |
|
278 | 277 | bestpos = len(lines) |
|
279 | 278 | for i, line in enumerate(lines): |
|
280 | 279 | if b':' in line: |
|
281 | 280 | lheader = line.split(b':', 1)[0].strip().lower() |
|
282 | 281 | lprio = PLAINHEADERS.get(lheader, newprio + 1) |
|
283 | 282 | if lprio == newprio: |
|
284 | 283 | lines[i] = b'%s: %s' % (header, value) |
|
285 | 284 | return lines |
|
286 | 285 | if lprio > newprio and i < bestpos: |
|
287 | 286 | bestpos = i |
|
288 | 287 | else: |
|
289 | 288 | if line: |
|
290 | 289 | lines.insert(i, b'') |
|
291 | 290 | if i < bestpos: |
|
292 | 291 | bestpos = i |
|
293 | 292 | break |
|
294 | 293 | lines.insert(bestpos, b'%s: %s' % (header, value)) |
|
295 | 294 | return lines |
|
296 | 295 | |
|
297 | 296 | |
|
298 | 297 | class patchheader(object): |
|
299 | 298 | def __init__(self, pf, plainmode=False): |
|
300 | 299 | def eatdiff(lines): |
|
301 | 300 | while lines: |
|
302 | 301 | l = lines[-1] |
|
303 | 302 | if ( |
|
304 | 303 | l.startswith(b"diff -") |
|
305 | 304 | or l.startswith(b"Index:") |
|
306 | 305 | or l.startswith(b"===========") |
|
307 | 306 | ): |
|
308 | 307 | del lines[-1] |
|
309 | 308 | else: |
|
310 | 309 | break |
|
311 | 310 | |
|
312 | 311 | def eatempty(lines): |
|
313 | 312 | while lines: |
|
314 | 313 | if not lines[-1].strip(): |
|
315 | 314 | del lines[-1] |
|
316 | 315 | else: |
|
317 | 316 | break |
|
318 | 317 | |
|
319 | 318 | message = [] |
|
320 | 319 | comments = [] |
|
321 | 320 | user = None |
|
322 | 321 | date = None |
|
323 | 322 | parent = None |
|
324 | 323 | format = None |
|
325 | 324 | subject = None |
|
326 | 325 | branch = None |
|
327 | 326 | nodeid = None |
|
328 | 327 | diffstart = 0 |
|
329 | 328 | |
|
330 | 329 | for line in open(pf, b'rb'): |
|
331 | 330 | line = line.rstrip() |
|
332 | 331 | if line.startswith(b'diff --git') or ( |
|
333 | 332 | diffstart and line.startswith(b'+++ ') |
|
334 | 333 | ): |
|
335 | 334 | diffstart = 2 |
|
336 | 335 | break |
|
337 | 336 | diffstart = 0 # reset |
|
338 | 337 | if line.startswith(b"--- "): |
|
339 | 338 | diffstart = 1 |
|
340 | 339 | continue |
|
341 | 340 | elif format == b"hgpatch": |
|
342 | 341 | # parse values when importing the result of an hg export |
|
343 | 342 | if line.startswith(b"# User "): |
|
344 | 343 | user = line[7:] |
|
345 | 344 | elif line.startswith(b"# Date "): |
|
346 | 345 | date = line[7:] |
|
347 | 346 | elif line.startswith(b"# Parent "): |
|
348 | 347 | parent = line[9:].lstrip() # handle double trailing space |
|
349 | 348 | elif line.startswith(b"# Branch "): |
|
350 | 349 | branch = line[9:] |
|
351 | 350 | elif line.startswith(b"# Node ID "): |
|
352 | 351 | nodeid = line[10:] |
|
353 | 352 | elif not line.startswith(b"# ") and line: |
|
354 | 353 | message.append(line) |
|
355 | 354 | format = None |
|
356 | 355 | elif line == b'# HG changeset patch': |
|
357 | 356 | message = [] |
|
358 | 357 | format = b"hgpatch" |
|
359 | 358 | elif format != b"tagdone" and ( |
|
360 | 359 | line.startswith(b"Subject: ") or line.startswith(b"subject: ") |
|
361 | 360 | ): |
|
362 | 361 | subject = line[9:] |
|
363 | 362 | format = b"tag" |
|
364 | 363 | elif format != b"tagdone" and ( |
|
365 | 364 | line.startswith(b"From: ") or line.startswith(b"from: ") |
|
366 | 365 | ): |
|
367 | 366 | user = line[6:] |
|
368 | 367 | format = b"tag" |
|
369 | 368 | elif format != b"tagdone" and ( |
|
370 | 369 | line.startswith(b"Date: ") or line.startswith(b"date: ") |
|
371 | 370 | ): |
|
372 | 371 | date = line[6:] |
|
373 | 372 | format = b"tag" |
|
374 | 373 | elif format == b"tag" and line == b"": |
|
375 | 374 | # when looking for tags (subject: from: etc) they |
|
376 | 375 | # end once you find a blank line in the source |
|
377 | 376 | format = b"tagdone" |
|
378 | 377 | elif message or line: |
|
379 | 378 | message.append(line) |
|
380 | 379 | comments.append(line) |
|
381 | 380 | |
|
382 | 381 | eatdiff(message) |
|
383 | 382 | eatdiff(comments) |
|
384 | 383 | # Remember the exact starting line of the patch diffs before consuming |
|
385 | 384 | # empty lines, for external use by TortoiseHg and others |
|
386 | 385 | self.diffstartline = len(comments) |
|
387 | 386 | eatempty(message) |
|
388 | 387 | eatempty(comments) |
|
389 | 388 | |
|
390 | 389 | # make sure message isn't empty |
|
391 | 390 | if format and format.startswith(b"tag") and subject: |
|
392 | 391 | message.insert(0, subject) |
|
393 | 392 | |
|
394 | 393 | self.message = message |
|
395 | 394 | self.comments = comments |
|
396 | 395 | self.user = user |
|
397 | 396 | self.date = date |
|
398 | 397 | self.parent = parent |
|
399 | 398 | # nodeid and branch are for external use by TortoiseHg and others |
|
400 | 399 | self.nodeid = nodeid |
|
401 | 400 | self.branch = branch |
|
402 | 401 | self.haspatch = diffstart > 1 |
|
403 | 402 | self.plainmode = ( |
|
404 | 403 | plainmode |
|
405 | 404 | or b'# HG changeset patch' not in self.comments |
|
406 | 405 | and any( |
|
407 | 406 | c.startswith(b'Date: ') or c.startswith(b'From: ') |
|
408 | 407 | for c in self.comments |
|
409 | 408 | ) |
|
410 | 409 | ) |
|
411 | 410 | |
|
412 | 411 | def setuser(self, user): |
|
413 | 412 | try: |
|
414 | 413 | inserthgheader(self.comments, b'# User ', user) |
|
415 | 414 | except ValueError: |
|
416 | 415 | if self.plainmode: |
|
417 | 416 | insertplainheader(self.comments, b'From', user) |
|
418 | 417 | else: |
|
419 | 418 | tmp = [b'# HG changeset patch', b'# User ' + user] |
|
420 | 419 | self.comments = tmp + self.comments |
|
421 | 420 | self.user = user |
|
422 | 421 | |
|
423 | 422 | def setdate(self, date): |
|
424 | 423 | try: |
|
425 | 424 | inserthgheader(self.comments, b'# Date ', date) |
|
426 | 425 | except ValueError: |
|
427 | 426 | if self.plainmode: |
|
428 | 427 | insertplainheader(self.comments, b'Date', date) |
|
429 | 428 | else: |
|
430 | 429 | tmp = [b'# HG changeset patch', b'# Date ' + date] |
|
431 | 430 | self.comments = tmp + self.comments |
|
432 | 431 | self.date = date |
|
433 | 432 | |
|
434 | 433 | def setparent(self, parent): |
|
435 | 434 | try: |
|
436 | 435 | inserthgheader(self.comments, b'# Parent ', parent) |
|
437 | 436 | except ValueError: |
|
438 | 437 | if not self.plainmode: |
|
439 | 438 | tmp = [b'# HG changeset patch', b'# Parent ' + parent] |
|
440 | 439 | self.comments = tmp + self.comments |
|
441 | 440 | self.parent = parent |
|
442 | 441 | |
|
443 | 442 | def setmessage(self, message): |
|
444 | 443 | if self.comments: |
|
445 | 444 | self._delmsg() |
|
446 | 445 | self.message = [message] |
|
447 | 446 | if message: |
|
448 | 447 | if self.plainmode and self.comments and self.comments[-1]: |
|
449 | 448 | self.comments.append(b'') |
|
450 | 449 | self.comments.append(message) |
|
451 | 450 | |
|
452 | 451 | def __bytes__(self): |
|
453 | 452 | s = b'\n'.join(self.comments).rstrip() |
|
454 | 453 | if not s: |
|
455 | 454 | return b'' |
|
456 | 455 | return s + b'\n\n' |
|
457 | 456 | |
|
458 | 457 | __str__ = encoding.strmethod(__bytes__) |
|
459 | 458 | |
|
460 | 459 | def _delmsg(self): |
|
461 | 460 | """Remove existing message, keeping the rest of the comments fields. |
|
462 | 461 | If comments contains 'subject: ', message will prepend |
|
463 | 462 | the field and a blank line.""" |
|
464 | 463 | if self.message: |
|
465 | 464 | subj = b'subject: ' + self.message[0].lower() |
|
466 | 465 | for i in pycompat.xrange(len(self.comments)): |
|
467 | 466 | if subj == self.comments[i].lower(): |
|
468 | 467 | del self.comments[i] |
|
469 | 468 | self.message = self.message[2:] |
|
470 | 469 | break |
|
471 | 470 | ci = 0 |
|
472 | 471 | for mi in self.message: |
|
473 | 472 | while mi != self.comments[ci]: |
|
474 | 473 | ci += 1 |
|
475 | 474 | del self.comments[ci] |
|
476 | 475 | |
|
477 | 476 | |
|
478 | 477 | def newcommit(repo, phase, *args, **kwargs): |
|
479 | 478 | """helper dedicated to ensure a commit respect mq.secret setting |
|
480 | 479 | |
|
481 | 480 | It should be used instead of repo.commit inside the mq source for operation |
|
482 | 481 | creating new changeset. |
|
483 | 482 | """ |
|
484 | 483 | repo = repo.unfiltered() |
|
485 | 484 | if phase is None: |
|
486 | 485 | if repo.ui.configbool(b'mq', b'secret'): |
|
487 | 486 | phase = phases.secret |
|
488 | 487 | overrides = {(b'ui', b'allowemptycommit'): True} |
|
489 | 488 | if phase is not None: |
|
490 | 489 | overrides[(b'phases', b'new-commit')] = phase |
|
491 | 490 | with repo.ui.configoverride(overrides, b'mq'): |
|
492 | 491 | repo.ui.setconfig(b'ui', b'allowemptycommit', True) |
|
493 | 492 | return repo.commit(*args, **kwargs) |
|
494 | 493 | |
|
495 | 494 | |
|
496 | 495 | class AbortNoCleanup(error.Abort): |
|
497 | 496 | pass |
|
498 | 497 | |
|
499 | 498 | |
|
500 | 499 | class queue(object): |
|
501 | 500 | def __init__(self, ui, baseui, path, patchdir=None): |
|
502 | 501 | self.basepath = path |
|
503 | 502 | try: |
|
504 | 503 | with open(os.path.join(path, b'patches.queue'), 'rb') as fh: |
|
505 | 504 | cur = fh.read().rstrip() |
|
506 | 505 | |
|
507 | 506 | if not cur: |
|
508 | 507 | curpath = os.path.join(path, b'patches') |
|
509 | 508 | else: |
|
510 | 509 | curpath = os.path.join(path, b'patches-' + cur) |
|
511 | 510 | except IOError: |
|
512 | 511 | curpath = os.path.join(path, b'patches') |
|
513 | 512 | self.path = patchdir or curpath |
|
514 | 513 | self.opener = vfsmod.vfs(self.path) |
|
515 | 514 | self.ui = ui |
|
516 | 515 | self.baseui = baseui |
|
517 | 516 | self.applieddirty = False |
|
518 | 517 | self.seriesdirty = False |
|
519 | 518 | self.added = [] |
|
520 | 519 | self.seriespath = b"series" |
|
521 | 520 | self.statuspath = b"status" |
|
522 | 521 | self.guardspath = b"guards" |
|
523 | 522 | self.activeguards = None |
|
524 | 523 | self.guardsdirty = False |
|
525 | 524 | # Handle mq.git as a bool with extended values |
|
526 | 525 | gitmode = ui.config(b'mq', b'git').lower() |
|
527 | 526 | boolmode = stringutil.parsebool(gitmode) |
|
528 | 527 | if boolmode is not None: |
|
529 | 528 | if boolmode: |
|
530 | 529 | gitmode = b'yes' |
|
531 | 530 | else: |
|
532 | 531 | gitmode = b'no' |
|
533 | 532 | self.gitmode = gitmode |
|
534 | 533 | # deprecated config: mq.plain |
|
535 | 534 | self.plainmode = ui.configbool(b'mq', b'plain') |
|
536 | 535 | self.checkapplied = True |
|
537 | 536 | |
|
538 | 537 | @util.propertycache |
|
539 | 538 | def applied(self): |
|
540 | 539 | def parselines(lines): |
|
541 | 540 | for l in lines: |
|
542 | 541 | entry = l.split(b':', 1) |
|
543 | 542 | if len(entry) > 1: |
|
544 | 543 | n, name = entry |
|
545 | 544 | yield statusentry(bin(n), name) |
|
546 | 545 | elif l.strip(): |
|
547 | 546 | self.ui.warn( |
|
548 | 547 | _(b'malformated mq status line: %s\n') |
|
549 | 548 | % stringutil.pprint(entry) |
|
550 | 549 | ) |
|
551 | 550 | # else we ignore empty lines |
|
552 | 551 | |
|
553 | 552 | try: |
|
554 | 553 | lines = self.opener.read(self.statuspath).splitlines() |
|
555 | 554 | return list(parselines(lines)) |
|
556 | 555 | except IOError as e: |
|
557 | 556 | if e.errno == errno.ENOENT: |
|
558 | 557 | return [] |
|
559 | 558 | raise |
|
560 | 559 | |
|
561 | 560 | @util.propertycache |
|
562 | 561 | def fullseries(self): |
|
563 | 562 | try: |
|
564 | 563 | return self.opener.read(self.seriespath).splitlines() |
|
565 | 564 | except IOError as e: |
|
566 | 565 | if e.errno == errno.ENOENT: |
|
567 | 566 | return [] |
|
568 | 567 | raise |
|
569 | 568 | |
|
570 | 569 | @util.propertycache |
|
571 | 570 | def series(self): |
|
572 | 571 | self.parseseries() |
|
573 | 572 | return self.series |
|
574 | 573 | |
|
575 | 574 | @util.propertycache |
|
576 | 575 | def seriesguards(self): |
|
577 | 576 | self.parseseries() |
|
578 | 577 | return self.seriesguards |
|
579 | 578 | |
|
580 | 579 | def invalidate(self): |
|
581 | 580 | for a in 'applied fullseries series seriesguards'.split(): |
|
582 | 581 | if a in self.__dict__: |
|
583 | 582 | delattr(self, a) |
|
584 | 583 | self.applieddirty = False |
|
585 | 584 | self.seriesdirty = False |
|
586 | 585 | self.guardsdirty = False |
|
587 | 586 | self.activeguards = None |
|
588 | 587 | |
|
589 | 588 | def diffopts(self, opts=None, patchfn=None, plain=False): |
|
590 | 589 | """Return diff options tweaked for this mq use, possibly upgrading to |
|
591 | 590 | git format, and possibly plain and without lossy options.""" |
|
592 | 591 | diffopts = patchmod.difffeatureopts( |
|
593 | 592 | self.ui, |
|
594 | 593 | opts, |
|
595 | 594 | git=True, |
|
596 | 595 | whitespace=not plain, |
|
597 | 596 | formatchanging=not plain, |
|
598 | 597 | ) |
|
599 | 598 | if self.gitmode == b'auto': |
|
600 | 599 | diffopts.upgrade = True |
|
601 | 600 | elif self.gitmode == b'keep': |
|
602 | 601 | pass |
|
603 | 602 | elif self.gitmode in (b'yes', b'no'): |
|
604 | 603 | diffopts.git = self.gitmode == b'yes' |
|
605 | 604 | else: |
|
606 | 605 | raise error.Abort( |
|
607 | 606 | _(b'mq.git option can be auto/keep/yes/no got %s') |
|
608 | 607 | % self.gitmode |
|
609 | 608 | ) |
|
610 | 609 | if patchfn: |
|
611 | 610 | diffopts = self.patchopts(diffopts, patchfn) |
|
612 | 611 | return diffopts |
|
613 | 612 | |
|
614 | 613 | def patchopts(self, diffopts, *patches): |
|
615 | 614 | """Return a copy of input diff options with git set to true if |
|
616 | 615 | referenced patch is a git patch and should be preserved as such. |
|
617 | 616 | """ |
|
618 | 617 | diffopts = diffopts.copy() |
|
619 | 618 | if not diffopts.git and self.gitmode == b'keep': |
|
620 | 619 | for patchfn in patches: |
|
621 | 620 | patchf = self.opener(patchfn, b'r') |
|
622 | 621 | # if the patch was a git patch, refresh it as a git patch |
|
623 | 622 | diffopts.git = any( |
|
624 | 623 | line.startswith(b'diff --git') for line in patchf |
|
625 | 624 | ) |
|
626 | 625 | patchf.close() |
|
627 | 626 | return diffopts |
|
628 | 627 | |
|
629 | 628 | def join(self, *p): |
|
630 | 629 | return os.path.join(self.path, *p) |
|
631 | 630 | |
|
632 | 631 | def findseries(self, patch): |
|
633 | 632 | def matchpatch(l): |
|
634 | 633 | l = l.split(b'#', 1)[0] |
|
635 | 634 | return l.strip() == patch |
|
636 | 635 | |
|
637 | 636 | for index, l in enumerate(self.fullseries): |
|
638 | 637 | if matchpatch(l): |
|
639 | 638 | return index |
|
640 | 639 | return None |
|
641 | 640 | |
|
642 | 641 | guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)') |
|
643 | 642 | |
|
644 | 643 | def parseseries(self): |
|
645 | 644 | self.series = [] |
|
646 | 645 | self.seriesguards = [] |
|
647 | 646 | for l in self.fullseries: |
|
648 | 647 | h = l.find(b'#') |
|
649 | 648 | if h == -1: |
|
650 | 649 | patch = l |
|
651 | 650 | comment = b'' |
|
652 | 651 | elif h == 0: |
|
653 | 652 | continue |
|
654 | 653 | else: |
|
655 | 654 | patch = l[:h] |
|
656 | 655 | comment = l[h:] |
|
657 | 656 | patch = patch.strip() |
|
658 | 657 | if patch: |
|
659 | 658 | if patch in self.series: |
|
660 | 659 | raise error.Abort( |
|
661 | 660 | _(b'%s appears more than once in %s') |
|
662 | 661 | % (patch, self.join(self.seriespath)) |
|
663 | 662 | ) |
|
664 | 663 | self.series.append(patch) |
|
665 | 664 | self.seriesguards.append(self.guard_re.findall(comment)) |
|
666 | 665 | |
|
667 | 666 | def checkguard(self, guard): |
|
668 | 667 | if not guard: |
|
669 | 668 | return _(b'guard cannot be an empty string') |
|
670 | 669 | bad_chars = b'# \t\r\n\f' |
|
671 | 670 | first = guard[0] |
|
672 | 671 | if first in b'-+': |
|
673 | 672 | return _(b'guard %r starts with invalid character: %r') % ( |
|
674 | 673 | guard, |
|
675 | 674 | first, |
|
676 | 675 | ) |
|
677 | 676 | for c in bad_chars: |
|
678 | 677 | if c in guard: |
|
679 | 678 | return _(b'invalid character in guard %r: %r') % (guard, c) |
|
680 | 679 | |
|
681 | 680 | def setactive(self, guards): |
|
682 | 681 | for guard in guards: |
|
683 | 682 | bad = self.checkguard(guard) |
|
684 | 683 | if bad: |
|
685 | 684 | raise error.Abort(bad) |
|
686 | 685 | guards = sorted(set(guards)) |
|
687 | 686 | self.ui.debug(b'active guards: %s\n' % b' '.join(guards)) |
|
688 | 687 | self.activeguards = guards |
|
689 | 688 | self.guardsdirty = True |
|
690 | 689 | |
|
691 | 690 | def active(self): |
|
692 | 691 | if self.activeguards is None: |
|
693 | 692 | self.activeguards = [] |
|
694 | 693 | try: |
|
695 | 694 | guards = self.opener.read(self.guardspath).split() |
|
696 | 695 | except IOError as err: |
|
697 | 696 | if err.errno != errno.ENOENT: |
|
698 | 697 | raise |
|
699 | 698 | guards = [] |
|
700 | 699 | for i, guard in enumerate(guards): |
|
701 | 700 | bad = self.checkguard(guard) |
|
702 | 701 | if bad: |
|
703 | 702 | self.ui.warn( |
|
704 | 703 | b'%s:%d: %s\n' |
|
705 | 704 | % (self.join(self.guardspath), i + 1, bad) |
|
706 | 705 | ) |
|
707 | 706 | else: |
|
708 | 707 | self.activeguards.append(guard) |
|
709 | 708 | return self.activeguards |
|
710 | 709 | |
|
711 | 710 | def setguards(self, idx, guards): |
|
712 | 711 | for g in guards: |
|
713 | 712 | if len(g) < 2: |
|
714 | 713 | raise error.Abort(_(b'guard %r too short') % g) |
|
715 | 714 | if g[0] not in b'-+': |
|
716 | 715 | raise error.Abort(_(b'guard %r starts with invalid char') % g) |
|
717 | 716 | bad = self.checkguard(g[1:]) |
|
718 | 717 | if bad: |
|
719 | 718 | raise error.Abort(bad) |
|
720 | 719 | drop = self.guard_re.sub(b'', self.fullseries[idx]) |
|
721 | 720 | self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards]) |
|
722 | 721 | self.parseseries() |
|
723 | 722 | self.seriesdirty = True |
|
724 | 723 | |
|
725 | 724 | def pushable(self, idx): |
|
726 | 725 | if isinstance(idx, bytes): |
|
727 | 726 | idx = self.series.index(idx) |
|
728 | 727 | patchguards = self.seriesguards[idx] |
|
729 | 728 | if not patchguards: |
|
730 | 729 | return True, None |
|
731 | 730 | guards = self.active() |
|
732 | 731 | exactneg = [ |
|
733 | 732 | g for g in patchguards if g.startswith(b'-') and g[1:] in guards |
|
734 | 733 | ] |
|
735 | 734 | if exactneg: |
|
736 | 735 | return False, stringutil.pprint(exactneg[0]) |
|
737 | 736 | pos = [g for g in patchguards if g.startswith(b'+')] |
|
738 | 737 | exactpos = [g for g in pos if g[1:] in guards] |
|
739 | 738 | if pos: |
|
740 | 739 | if exactpos: |
|
741 | 740 | return True, stringutil.pprint(exactpos[0]) |
|
742 | 741 | return False, b' '.join([stringutil.pprint(p) for p in pos]) |
|
743 | 742 | return True, b'' |
|
744 | 743 | |
|
745 | 744 | def explainpushable(self, idx, all_patches=False): |
|
746 | 745 | if all_patches: |
|
747 | 746 | write = self.ui.write |
|
748 | 747 | else: |
|
749 | 748 | write = self.ui.warn |
|
750 | 749 | |
|
751 | 750 | if all_patches or self.ui.verbose: |
|
752 | 751 | if isinstance(idx, bytes): |
|
753 | 752 | idx = self.series.index(idx) |
|
754 | 753 | pushable, why = self.pushable(idx) |
|
755 | 754 | if all_patches and pushable: |
|
756 | 755 | if why is None: |
|
757 | 756 | write( |
|
758 | 757 | _(b'allowing %s - no guards in effect\n') |
|
759 | 758 | % self.series[idx] |
|
760 | 759 | ) |
|
761 | 760 | else: |
|
762 | 761 | if not why: |
|
763 | 762 | write( |
|
764 | 763 | _(b'allowing %s - no matching negative guards\n') |
|
765 | 764 | % self.series[idx] |
|
766 | 765 | ) |
|
767 | 766 | else: |
|
768 | 767 | write( |
|
769 | 768 | _(b'allowing %s - guarded by %s\n') |
|
770 | 769 | % (self.series[idx], why) |
|
771 | 770 | ) |
|
772 | 771 | if not pushable: |
|
773 | 772 | if why: |
|
774 | 773 | write( |
|
775 | 774 | _(b'skipping %s - guarded by %s\n') |
|
776 | 775 | % (self.series[idx], why) |
|
777 | 776 | ) |
|
778 | 777 | else: |
|
779 | 778 | write( |
|
780 | 779 | _(b'skipping %s - no matching guards\n') |
|
781 | 780 | % self.series[idx] |
|
782 | 781 | ) |
|
783 | 782 | |
|
784 | 783 | def savedirty(self): |
|
785 | 784 | def writelist(items, path): |
|
786 | 785 | fp = self.opener(path, b'wb') |
|
787 | 786 | for i in items: |
|
788 | 787 | fp.write(b"%s\n" % i) |
|
789 | 788 | fp.close() |
|
790 | 789 | |
|
791 | 790 | if self.applieddirty: |
|
792 | 791 | writelist(map(bytes, self.applied), self.statuspath) |
|
793 | 792 | self.applieddirty = False |
|
794 | 793 | if self.seriesdirty: |
|
795 | 794 | writelist(self.fullseries, self.seriespath) |
|
796 | 795 | self.seriesdirty = False |
|
797 | 796 | if self.guardsdirty: |
|
798 | 797 | writelist(self.activeguards, self.guardspath) |
|
799 | 798 | self.guardsdirty = False |
|
800 | 799 | if self.added: |
|
801 | 800 | qrepo = self.qrepo() |
|
802 | 801 | if qrepo: |
|
803 | 802 | qrepo[None].add(f for f in self.added if f not in qrepo[None]) |
|
804 | 803 | self.added = [] |
|
805 | 804 | |
|
806 | 805 | def removeundo(self, repo): |
|
807 | 806 | undo = repo.sjoin(b'undo') |
|
808 | 807 | if not os.path.exists(undo): |
|
809 | 808 | return |
|
810 | 809 | try: |
|
811 | 810 | os.unlink(undo) |
|
812 | 811 | except OSError as inst: |
|
813 | 812 | self.ui.warn( |
|
814 | 813 | _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst) |
|
815 | 814 | ) |
|
816 | 815 | |
|
817 | 816 | def backup(self, repo, files, copy=False): |
|
818 | 817 | # backup local changes in --force case |
|
819 | 818 | for f in sorted(files): |
|
820 | 819 | absf = repo.wjoin(f) |
|
821 | 820 | if os.path.lexists(absf): |
|
822 | 821 | absorig = scmutil.backuppath(self.ui, repo, f) |
|
823 | 822 | self.ui.note( |
|
824 | 823 | _(b'saving current version of %s as %s\n') |
|
825 | 824 | % (f, os.path.relpath(absorig)) |
|
826 | 825 | ) |
|
827 | 826 | |
|
828 | 827 | if copy: |
|
829 | 828 | util.copyfile(absf, absorig) |
|
830 | 829 | else: |
|
831 | 830 | util.rename(absf, absorig) |
|
832 | 831 | |
|
833 | 832 | def printdiff( |
|
834 | 833 | self, |
|
835 | 834 | repo, |
|
836 | 835 | diffopts, |
|
837 | 836 | node1, |
|
838 | 837 | node2=None, |
|
839 | 838 | files=None, |
|
840 | 839 | fp=None, |
|
841 | 840 | changes=None, |
|
842 | 841 | opts=None, |
|
843 | 842 | ): |
|
844 | 843 | if opts is None: |
|
845 | 844 | opts = {} |
|
846 | 845 | stat = opts.get(b'stat') |
|
847 | 846 | m = scmutil.match(repo[node1], files, opts) |
|
848 | 847 | logcmdutil.diffordiffstat( |
|
849 | 848 | self.ui, |
|
850 | 849 | repo, |
|
851 | 850 | diffopts, |
|
852 | 851 | repo[node1], |
|
853 | 852 | repo[node2], |
|
854 | 853 | m, |
|
855 | 854 | changes, |
|
856 | 855 | stat, |
|
857 | 856 | fp, |
|
858 | 857 | ) |
|
859 | 858 | |
|
860 | 859 | def mergeone(self, repo, mergeq, head, patch, rev, diffopts): |
|
861 | 860 | # first try just applying the patch |
|
862 | 861 | (err, n) = self.apply( |
|
863 | 862 | repo, [patch], update_status=False, strict=True, merge=rev |
|
864 | 863 | ) |
|
865 | 864 | |
|
866 | 865 | if err == 0: |
|
867 | 866 | return (err, n) |
|
868 | 867 | |
|
869 | 868 | if n is None: |
|
870 | 869 | raise error.Abort(_(b"apply failed for patch %s") % patch) |
|
871 | 870 | |
|
872 | 871 | self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch) |
|
873 | 872 | |
|
874 | 873 | # apply failed, strip away that rev and merge. |
|
875 | 874 | hg.clean(repo, head) |
|
876 | 875 | strip(self.ui, repo, [n], update=False, backup=False) |
|
877 | 876 | |
|
878 | 877 | ctx = repo[rev] |
|
879 | 878 | ret = hg.merge(ctx, remind=False) |
|
880 | 879 | if ret: |
|
881 | 880 | raise error.Abort(_(b"update returned %d") % ret) |
|
882 | 881 | n = newcommit(repo, None, ctx.description(), ctx.user(), force=True) |
|
883 | 882 | if n is None: |
|
884 | 883 | raise error.Abort(_(b"repo commit failed")) |
|
885 | 884 | try: |
|
886 | 885 | ph = patchheader(mergeq.join(patch), self.plainmode) |
|
887 | 886 | except Exception: |
|
888 | 887 | raise error.Abort(_(b"unable to read %s") % patch) |
|
889 | 888 | |
|
890 | 889 | diffopts = self.patchopts(diffopts, patch) |
|
891 | 890 | patchf = self.opener(patch, b"w") |
|
892 | 891 | comments = bytes(ph) |
|
893 | 892 | if comments: |
|
894 | 893 | patchf.write(comments) |
|
895 | 894 | self.printdiff(repo, diffopts, head, n, fp=patchf) |
|
896 | 895 | patchf.close() |
|
897 | 896 | self.removeundo(repo) |
|
898 | 897 | return (0, n) |
|
899 | 898 | |
|
900 | 899 | def qparents(self, repo, rev=None): |
|
901 | 900 | """return the mq handled parent or p1 |
|
902 | 901 | |
|
903 | 902 | In some case where mq get himself in being the parent of a merge the |
|
904 | 903 | appropriate parent may be p2. |
|
905 | 904 | (eg: an in progress merge started with mq disabled) |
|
906 | 905 | |
|
907 | 906 | If no parent are managed by mq, p1 is returned. |
|
908 | 907 | """ |
|
909 | 908 | if rev is None: |
|
910 | 909 | (p1, p2) = repo.dirstate.parents() |
|
911 | if p2 == nullid: | |
|
910 | if p2 == repo.nullid: | |
|
912 | 911 | return p1 |
|
913 | 912 | if not self.applied: |
|
914 | 913 | return None |
|
915 | 914 | return self.applied[-1].node |
|
916 | 915 | p1, p2 = repo.changelog.parents(rev) |
|
917 | if p2 != nullid and p2 in [x.node for x in self.applied]: | |
|
916 | if p2 != repo.nullid and p2 in [x.node for x in self.applied]: | |
|
918 | 917 | return p2 |
|
919 | 918 | return p1 |
|
920 | 919 | |
|
921 | 920 | def mergepatch(self, repo, mergeq, series, diffopts): |
|
922 | 921 | if not self.applied: |
|
923 | 922 | # each of the patches merged in will have two parents. This |
|
924 | 923 | # can confuse the qrefresh, qdiff, and strip code because it |
|
925 | 924 | # needs to know which parent is actually in the patch queue. |
|
926 | 925 | # so, we insert a merge marker with only one parent. This way |
|
927 | 926 | # the first patch in the queue is never a merge patch |
|
928 | 927 | # |
|
929 | 928 | pname = b".hg.patches.merge.marker" |
|
930 | 929 | n = newcommit(repo, None, b'[mq]: merge marker', force=True) |
|
931 | 930 | self.removeundo(repo) |
|
932 | 931 | self.applied.append(statusentry(n, pname)) |
|
933 | 932 | self.applieddirty = True |
|
934 | 933 | |
|
935 | 934 | head = self.qparents(repo) |
|
936 | 935 | |
|
937 | 936 | for patch in series: |
|
938 | 937 | patch = mergeq.lookup(patch, strict=True) |
|
939 | 938 | if not patch: |
|
940 | 939 | self.ui.warn(_(b"patch %s does not exist\n") % patch) |
|
941 | 940 | return (1, None) |
|
942 | 941 | pushable, reason = self.pushable(patch) |
|
943 | 942 | if not pushable: |
|
944 | 943 | self.explainpushable(patch, all_patches=True) |
|
945 | 944 | continue |
|
946 | 945 | info = mergeq.isapplied(patch) |
|
947 | 946 | if not info: |
|
948 | 947 | self.ui.warn(_(b"patch %s is not applied\n") % patch) |
|
949 | 948 | return (1, None) |
|
950 | 949 | rev = info[1] |
|
951 | 950 | err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts) |
|
952 | 951 | if head: |
|
953 | 952 | self.applied.append(statusentry(head, patch)) |
|
954 | 953 | self.applieddirty = True |
|
955 | 954 | if err: |
|
956 | 955 | return (err, head) |
|
957 | 956 | self.savedirty() |
|
958 | 957 | return (0, head) |
|
959 | 958 | |
|
960 | 959 | def patch(self, repo, patchfile): |
|
961 | 960 | """Apply patchfile to the working directory. |
|
962 | 961 | patchfile: name of patch file""" |
|
963 | 962 | files = set() |
|
964 | 963 | try: |
|
965 | 964 | fuzz = patchmod.patch( |
|
966 | 965 | self.ui, repo, patchfile, strip=1, files=files, eolmode=None |
|
967 | 966 | ) |
|
968 | 967 | return (True, list(files), fuzz) |
|
969 | 968 | except Exception as inst: |
|
970 | 969 | self.ui.note(stringutil.forcebytestr(inst) + b'\n') |
|
971 | 970 | if not self.ui.verbose: |
|
972 | 971 | self.ui.warn(_(b"patch failed, unable to continue (try -v)\n")) |
|
973 | 972 | self.ui.traceback() |
|
974 | 973 | return (False, list(files), False) |
|
975 | 974 | |
|
976 | 975 | def apply( |
|
977 | 976 | self, |
|
978 | 977 | repo, |
|
979 | 978 | series, |
|
980 | 979 | list=False, |
|
981 | 980 | update_status=True, |
|
982 | 981 | strict=False, |
|
983 | 982 | patchdir=None, |
|
984 | 983 | merge=None, |
|
985 | 984 | all_files=None, |
|
986 | 985 | tobackup=None, |
|
987 | 986 | keepchanges=False, |
|
988 | 987 | ): |
|
989 | 988 | wlock = lock = tr = None |
|
990 | 989 | try: |
|
991 | 990 | wlock = repo.wlock() |
|
992 | 991 | lock = repo.lock() |
|
993 | 992 | tr = repo.transaction(b"qpush") |
|
994 | 993 | try: |
|
995 | 994 | ret = self._apply( |
|
996 | 995 | repo, |
|
997 | 996 | series, |
|
998 | 997 | list, |
|
999 | 998 | update_status, |
|
1000 | 999 | strict, |
|
1001 | 1000 | patchdir, |
|
1002 | 1001 | merge, |
|
1003 | 1002 | all_files=all_files, |
|
1004 | 1003 | tobackup=tobackup, |
|
1005 | 1004 | keepchanges=keepchanges, |
|
1006 | 1005 | ) |
|
1007 | 1006 | tr.close() |
|
1008 | 1007 | self.savedirty() |
|
1009 | 1008 | return ret |
|
1010 | 1009 | except AbortNoCleanup: |
|
1011 | 1010 | tr.close() |
|
1012 | 1011 | self.savedirty() |
|
1013 | 1012 | raise |
|
1014 | 1013 | except: # re-raises |
|
1015 | 1014 | try: |
|
1016 | 1015 | tr.abort() |
|
1017 | 1016 | finally: |
|
1018 | 1017 | self.invalidate() |
|
1019 | 1018 | raise |
|
1020 | 1019 | finally: |
|
1021 | 1020 | release(tr, lock, wlock) |
|
1022 | 1021 | self.removeundo(repo) |
|
1023 | 1022 | |
|
1024 | 1023 | def _apply( |
|
1025 | 1024 | self, |
|
1026 | 1025 | repo, |
|
1027 | 1026 | series, |
|
1028 | 1027 | list=False, |
|
1029 | 1028 | update_status=True, |
|
1030 | 1029 | strict=False, |
|
1031 | 1030 | patchdir=None, |
|
1032 | 1031 | merge=None, |
|
1033 | 1032 | all_files=None, |
|
1034 | 1033 | tobackup=None, |
|
1035 | 1034 | keepchanges=False, |
|
1036 | 1035 | ): |
|
1037 | 1036 | """returns (error, hash) |
|
1038 | 1037 | |
|
1039 | 1038 | error = 1 for unable to read, 2 for patch failed, 3 for patch |
|
1040 | 1039 | fuzz. tobackup is None or a set of files to backup before they |
|
1041 | 1040 | are modified by a patch. |
|
1042 | 1041 | """ |
|
1043 | 1042 | # TODO unify with commands.py |
|
1044 | 1043 | if not patchdir: |
|
1045 | 1044 | patchdir = self.path |
|
1046 | 1045 | err = 0 |
|
1047 | 1046 | n = None |
|
1048 | 1047 | for patchname in series: |
|
1049 | 1048 | pushable, reason = self.pushable(patchname) |
|
1050 | 1049 | if not pushable: |
|
1051 | 1050 | self.explainpushable(patchname, all_patches=True) |
|
1052 | 1051 | continue |
|
1053 | 1052 | self.ui.status(_(b"applying %s\n") % patchname) |
|
1054 | 1053 | pf = os.path.join(patchdir, patchname) |
|
1055 | 1054 | |
|
1056 | 1055 | try: |
|
1057 | 1056 | ph = patchheader(self.join(patchname), self.plainmode) |
|
1058 | 1057 | except IOError: |
|
1059 | 1058 | self.ui.warn(_(b"unable to read %s\n") % patchname) |
|
1060 | 1059 | err = 1 |
|
1061 | 1060 | break |
|
1062 | 1061 | |
|
1063 | 1062 | message = ph.message |
|
1064 | 1063 | if not message: |
|
1065 | 1064 | # The commit message should not be translated |
|
1066 | 1065 | message = b"imported patch %s\n" % patchname |
|
1067 | 1066 | else: |
|
1068 | 1067 | if list: |
|
1069 | 1068 | # The commit message should not be translated |
|
1070 | 1069 | message.append(b"\nimported patch %s" % patchname) |
|
1071 | 1070 | message = b'\n'.join(message) |
|
1072 | 1071 | |
|
1073 | 1072 | if ph.haspatch: |
|
1074 | 1073 | if tobackup: |
|
1075 | 1074 | touched = patchmod.changedfiles(self.ui, repo, pf) |
|
1076 | 1075 | touched = set(touched) & tobackup |
|
1077 | 1076 | if touched and keepchanges: |
|
1078 | 1077 | raise AbortNoCleanup( |
|
1079 | 1078 | _(b"conflicting local changes found"), |
|
1080 | 1079 | hint=_(b"did you forget to qrefresh?"), |
|
1081 | 1080 | ) |
|
1082 | 1081 | self.backup(repo, touched, copy=True) |
|
1083 | 1082 | tobackup = tobackup - touched |
|
1084 | 1083 | (patcherr, files, fuzz) = self.patch(repo, pf) |
|
1085 | 1084 | if all_files is not None: |
|
1086 | 1085 | all_files.update(files) |
|
1087 | 1086 | patcherr = not patcherr |
|
1088 | 1087 | else: |
|
1089 | 1088 | self.ui.warn(_(b"patch %s is empty\n") % patchname) |
|
1090 | 1089 | patcherr, files, fuzz = 0, [], 0 |
|
1091 | 1090 | |
|
1092 | 1091 | if merge and files: |
|
1093 | 1092 | # Mark as removed/merged and update dirstate parent info |
|
1094 | 1093 | removed = [] |
|
1095 | 1094 | merged = [] |
|
1096 | 1095 | for f in files: |
|
1097 | 1096 | if os.path.lexists(repo.wjoin(f)): |
|
1098 | 1097 | merged.append(f) |
|
1099 | 1098 | else: |
|
1100 | 1099 | removed.append(f) |
|
1101 | 1100 | with repo.dirstate.parentchange(): |
|
1102 | 1101 | for f in removed: |
|
1103 | 1102 | repo.dirstate.remove(f) |
|
1104 | 1103 | for f in merged: |
|
1105 | 1104 | repo.dirstate.merge(f) |
|
1106 | 1105 | p1 = repo.dirstate.p1() |
|
1107 | 1106 | repo.setparents(p1, merge) |
|
1108 | 1107 | |
|
1109 | 1108 | if all_files and b'.hgsubstate' in all_files: |
|
1110 | 1109 | wctx = repo[None] |
|
1111 | 1110 | pctx = repo[b'.'] |
|
1112 | 1111 | overwrite = False |
|
1113 | 1112 | mergedsubstate = subrepoutil.submerge( |
|
1114 | 1113 | repo, pctx, wctx, wctx, overwrite |
|
1115 | 1114 | ) |
|
1116 | 1115 | files += mergedsubstate.keys() |
|
1117 | 1116 | |
|
1118 | 1117 | match = scmutil.matchfiles(repo, files or []) |
|
1119 | 1118 | oldtip = repo.changelog.tip() |
|
1120 | 1119 | n = newcommit( |
|
1121 | 1120 | repo, None, message, ph.user, ph.date, match=match, force=True |
|
1122 | 1121 | ) |
|
1123 | 1122 | if repo.changelog.tip() == oldtip: |
|
1124 | 1123 | raise error.Abort( |
|
1125 | 1124 | _(b"qpush exactly duplicates child changeset") |
|
1126 | 1125 | ) |
|
1127 | 1126 | if n is None: |
|
1128 | 1127 | raise error.Abort(_(b"repository commit failed")) |
|
1129 | 1128 | |
|
1130 | 1129 | if update_status: |
|
1131 | 1130 | self.applied.append(statusentry(n, patchname)) |
|
1132 | 1131 | |
|
1133 | 1132 | if patcherr: |
|
1134 | 1133 | self.ui.warn( |
|
1135 | 1134 | _(b"patch failed, rejects left in working directory\n") |
|
1136 | 1135 | ) |
|
1137 | 1136 | err = 2 |
|
1138 | 1137 | break |
|
1139 | 1138 | |
|
1140 | 1139 | if fuzz and strict: |
|
1141 | 1140 | self.ui.warn(_(b"fuzz found when applying patch, stopping\n")) |
|
1142 | 1141 | err = 3 |
|
1143 | 1142 | break |
|
1144 | 1143 | return (err, n) |
|
1145 | 1144 | |
|
1146 | 1145 | def _cleanup(self, patches, numrevs, keep=False): |
|
1147 | 1146 | if not keep: |
|
1148 | 1147 | r = self.qrepo() |
|
1149 | 1148 | if r: |
|
1150 | 1149 | r[None].forget(patches) |
|
1151 | 1150 | for p in patches: |
|
1152 | 1151 | try: |
|
1153 | 1152 | os.unlink(self.join(p)) |
|
1154 | 1153 | except OSError as inst: |
|
1155 | 1154 | if inst.errno != errno.ENOENT: |
|
1156 | 1155 | raise |
|
1157 | 1156 | |
|
1158 | 1157 | qfinished = [] |
|
1159 | 1158 | if numrevs: |
|
1160 | 1159 | qfinished = self.applied[:numrevs] |
|
1161 | 1160 | del self.applied[:numrevs] |
|
1162 | 1161 | self.applieddirty = True |
|
1163 | 1162 | |
|
1164 | 1163 | unknown = [] |
|
1165 | 1164 | |
|
1166 | 1165 | sortedseries = [] |
|
1167 | 1166 | for p in patches: |
|
1168 | 1167 | idx = self.findseries(p) |
|
1169 | 1168 | if idx is None: |
|
1170 | 1169 | sortedseries.append((-1, p)) |
|
1171 | 1170 | else: |
|
1172 | 1171 | sortedseries.append((idx, p)) |
|
1173 | 1172 | |
|
1174 | 1173 | sortedseries.sort(reverse=True) |
|
1175 | 1174 | for (i, p) in sortedseries: |
|
1176 | 1175 | if i != -1: |
|
1177 | 1176 | del self.fullseries[i] |
|
1178 | 1177 | else: |
|
1179 | 1178 | unknown.append(p) |
|
1180 | 1179 | |
|
1181 | 1180 | if unknown: |
|
1182 | 1181 | if numrevs: |
|
1183 | 1182 | rev = {entry.name: entry.node for entry in qfinished} |
|
1184 | 1183 | for p in unknown: |
|
1185 | 1184 | msg = _(b'revision %s refers to unknown patches: %s\n') |
|
1186 | 1185 | self.ui.warn(msg % (short(rev[p]), p)) |
|
1187 | 1186 | else: |
|
1188 | 1187 | msg = _(b'unknown patches: %s\n') |
|
1189 | 1188 | raise error.Abort(b''.join(msg % p for p in unknown)) |
|
1190 | 1189 | |
|
1191 | 1190 | self.parseseries() |
|
1192 | 1191 | self.seriesdirty = True |
|
1193 | 1192 | return [entry.node for entry in qfinished] |
|
1194 | 1193 | |
|
1195 | 1194 | def _revpatches(self, repo, revs): |
|
1196 | 1195 | firstrev = repo[self.applied[0].node].rev() |
|
1197 | 1196 | patches = [] |
|
1198 | 1197 | for i, rev in enumerate(revs): |
|
1199 | 1198 | |
|
1200 | 1199 | if rev < firstrev: |
|
1201 | 1200 | raise error.Abort(_(b'revision %d is not managed') % rev) |
|
1202 | 1201 | |
|
1203 | 1202 | ctx = repo[rev] |
|
1204 | 1203 | base = self.applied[i].node |
|
1205 | 1204 | if ctx.node() != base: |
|
1206 | 1205 | msg = _(b'cannot delete revision %d above applied patches') |
|
1207 | 1206 | raise error.Abort(msg % rev) |
|
1208 | 1207 | |
|
1209 | 1208 | patch = self.applied[i].name |
|
1210 | 1209 | for fmt in (b'[mq]: %s', b'imported patch %s'): |
|
1211 | 1210 | if ctx.description() == fmt % patch: |
|
1212 | 1211 | msg = _(b'patch %s finalized without changeset message\n') |
|
1213 | 1212 | repo.ui.status(msg % patch) |
|
1214 | 1213 | break |
|
1215 | 1214 | |
|
1216 | 1215 | patches.append(patch) |
|
1217 | 1216 | return patches |
|
1218 | 1217 | |
|
1219 | 1218 | def finish(self, repo, revs): |
|
1220 | 1219 | # Manually trigger phase computation to ensure phasedefaults is |
|
1221 | 1220 | # executed before we remove the patches. |
|
1222 | 1221 | repo._phasecache |
|
1223 | 1222 | patches = self._revpatches(repo, sorted(revs)) |
|
1224 | 1223 | qfinished = self._cleanup(patches, len(patches)) |
|
1225 | 1224 | if qfinished and repo.ui.configbool(b'mq', b'secret'): |
|
1226 | 1225 | # only use this logic when the secret option is added |
|
1227 | 1226 | oldqbase = repo[qfinished[0]] |
|
1228 | 1227 | tphase = phases.newcommitphase(repo.ui) |
|
1229 | 1228 | if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase: |
|
1230 | 1229 | with repo.transaction(b'qfinish') as tr: |
|
1231 | 1230 | phases.advanceboundary(repo, tr, tphase, qfinished) |
|
1232 | 1231 | |
|
1233 | 1232 | def delete(self, repo, patches, opts): |
|
1234 | 1233 | if not patches and not opts.get(b'rev'): |
|
1235 | 1234 | raise error.Abort( |
|
1236 | 1235 | _(b'qdelete requires at least one revision or patch name') |
|
1237 | 1236 | ) |
|
1238 | 1237 | |
|
1239 | 1238 | realpatches = [] |
|
1240 | 1239 | for patch in patches: |
|
1241 | 1240 | patch = self.lookup(patch, strict=True) |
|
1242 | 1241 | info = self.isapplied(patch) |
|
1243 | 1242 | if info: |
|
1244 | 1243 | raise error.Abort(_(b"cannot delete applied patch %s") % patch) |
|
1245 | 1244 | if patch not in self.series: |
|
1246 | 1245 | raise error.Abort(_(b"patch %s not in series file") % patch) |
|
1247 | 1246 | if patch not in realpatches: |
|
1248 | 1247 | realpatches.append(patch) |
|
1249 | 1248 | |
|
1250 | 1249 | numrevs = 0 |
|
1251 | 1250 | if opts.get(b'rev'): |
|
1252 | 1251 | if not self.applied: |
|
1253 | 1252 | raise error.Abort(_(b'no patches applied')) |
|
1254 | 1253 | revs = scmutil.revrange(repo, opts.get(b'rev')) |
|
1255 | 1254 | revs.sort() |
|
1256 | 1255 | revpatches = self._revpatches(repo, revs) |
|
1257 | 1256 | realpatches += revpatches |
|
1258 | 1257 | numrevs = len(revpatches) |
|
1259 | 1258 | |
|
1260 | 1259 | self._cleanup(realpatches, numrevs, opts.get(b'keep')) |
|
1261 | 1260 | |
|
1262 | 1261 | def checktoppatch(self, repo): |
|
1263 | 1262 | '''check that working directory is at qtip''' |
|
1264 | 1263 | if self.applied: |
|
1265 | 1264 | top = self.applied[-1].node |
|
1266 | 1265 | patch = self.applied[-1].name |
|
1267 | 1266 | if repo.dirstate.p1() != top: |
|
1268 | 1267 | raise error.Abort(_(b"working directory revision is not qtip")) |
|
1269 | 1268 | return top, patch |
|
1270 | 1269 | return None, None |
|
1271 | 1270 | |
|
1272 | 1271 | def putsubstate2changes(self, substatestate, changes): |
|
1273 | 1272 | if isinstance(changes, list): |
|
1274 | 1273 | mar = changes[:3] |
|
1275 | 1274 | else: |
|
1276 | 1275 | mar = (changes.modified, changes.added, changes.removed) |
|
1277 | 1276 | if any((b'.hgsubstate' in files for files in mar)): |
|
1278 | 1277 | return # already listed up |
|
1279 | 1278 | # not yet listed up |
|
1280 | 1279 | if substatestate in b'a?': |
|
1281 | 1280 | mar[1].append(b'.hgsubstate') |
|
1282 | 1281 | elif substatestate in b'r': |
|
1283 | 1282 | mar[2].append(b'.hgsubstate') |
|
1284 | 1283 | else: # modified |
|
1285 | 1284 | mar[0].append(b'.hgsubstate') |
|
1286 | 1285 | |
|
1287 | 1286 | def checklocalchanges(self, repo, force=False, refresh=True): |
|
1288 | 1287 | excsuffix = b'' |
|
1289 | 1288 | if refresh: |
|
1290 | 1289 | excsuffix = b', qrefresh first' |
|
1291 | 1290 | # plain versions for i18n tool to detect them |
|
1292 | 1291 | _(b"local changes found, qrefresh first") |
|
1293 | 1292 | _(b"local changed subrepos found, qrefresh first") |
|
1294 | 1293 | |
|
1295 | 1294 | s = repo.status() |
|
1296 | 1295 | if not force: |
|
1297 | 1296 | cmdutil.checkunfinished(repo) |
|
1298 | 1297 | if s.modified or s.added or s.removed or s.deleted: |
|
1299 | 1298 | _(b"local changes found") # i18n tool detection |
|
1300 | 1299 | raise error.Abort(_(b"local changes found" + excsuffix)) |
|
1301 | 1300 | if checksubstate(repo): |
|
1302 | 1301 | _(b"local changed subrepos found") # i18n tool detection |
|
1303 | 1302 | raise error.Abort( |
|
1304 | 1303 | _(b"local changed subrepos found" + excsuffix) |
|
1305 | 1304 | ) |
|
1306 | 1305 | else: |
|
1307 | 1306 | cmdutil.checkunfinished(repo, skipmerge=True) |
|
1308 | 1307 | return s |
|
1309 | 1308 | |
|
1310 | 1309 | _reserved = (b'series', b'status', b'guards', b'.', b'..') |
|
1311 | 1310 | |
|
1312 | 1311 | def checkreservedname(self, name): |
|
1313 | 1312 | if name in self._reserved: |
|
1314 | 1313 | raise error.Abort( |
|
1315 | 1314 | _(b'"%s" cannot be used as the name of a patch') % name |
|
1316 | 1315 | ) |
|
1317 | 1316 | if name != name.strip(): |
|
1318 | 1317 | # whitespace is stripped by parseseries() |
|
1319 | 1318 | raise error.Abort( |
|
1320 | 1319 | _(b'patch name cannot begin or end with whitespace') |
|
1321 | 1320 | ) |
|
1322 | 1321 | for prefix in (b'.hg', b'.mq'): |
|
1323 | 1322 | if name.startswith(prefix): |
|
1324 | 1323 | raise error.Abort( |
|
1325 | 1324 | _(b'patch name cannot begin with "%s"') % prefix |
|
1326 | 1325 | ) |
|
1327 | 1326 | for c in (b'#', b':', b'\r', b'\n'): |
|
1328 | 1327 | if c in name: |
|
1329 | 1328 | raise error.Abort( |
|
1330 | 1329 | _(b'%r cannot be used in the name of a patch') |
|
1331 | 1330 | % pycompat.bytestr(c) |
|
1332 | 1331 | ) |
|
1333 | 1332 | |
|
1334 | 1333 | def checkpatchname(self, name, force=False): |
|
1335 | 1334 | self.checkreservedname(name) |
|
1336 | 1335 | if not force and os.path.exists(self.join(name)): |
|
1337 | 1336 | if os.path.isdir(self.join(name)): |
|
1338 | 1337 | raise error.Abort( |
|
1339 | 1338 | _(b'"%s" already exists as a directory') % name |
|
1340 | 1339 | ) |
|
1341 | 1340 | else: |
|
1342 | 1341 | raise error.Abort(_(b'patch "%s" already exists') % name) |
|
1343 | 1342 | |
|
1344 | 1343 | def makepatchname(self, title, fallbackname): |
|
1345 | 1344 | """Return a suitable filename for title, adding a suffix to make |
|
1346 | 1345 | it unique in the existing list""" |
|
1347 | 1346 | namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_') |
|
1348 | 1347 | namebase = namebase[:75] # avoid too long name (issue5117) |
|
1349 | 1348 | if namebase: |
|
1350 | 1349 | try: |
|
1351 | 1350 | self.checkreservedname(namebase) |
|
1352 | 1351 | except error.Abort: |
|
1353 | 1352 | namebase = fallbackname |
|
1354 | 1353 | else: |
|
1355 | 1354 | namebase = fallbackname |
|
1356 | 1355 | name = namebase |
|
1357 | 1356 | i = 0 |
|
1358 | 1357 | while True: |
|
1359 | 1358 | if name not in self.fullseries: |
|
1360 | 1359 | try: |
|
1361 | 1360 | self.checkpatchname(name) |
|
1362 | 1361 | break |
|
1363 | 1362 | except error.Abort: |
|
1364 | 1363 | pass |
|
1365 | 1364 | i += 1 |
|
1366 | 1365 | name = b'%s__%d' % (namebase, i) |
|
1367 | 1366 | return name |
|
1368 | 1367 | |
|
1369 | 1368 | def checkkeepchanges(self, keepchanges, force): |
|
1370 | 1369 | if force and keepchanges: |
|
1371 | 1370 | raise error.Abort(_(b'cannot use both --force and --keep-changes')) |
|
1372 | 1371 | |
|
1373 | 1372 | def new(self, repo, patchfn, *pats, **opts): |
|
1374 | 1373 | """options: |
|
1375 | 1374 | msg: a string or a no-argument function returning a string |
|
1376 | 1375 | """ |
|
1377 | 1376 | opts = pycompat.byteskwargs(opts) |
|
1378 | 1377 | msg = opts.get(b'msg') |
|
1379 | 1378 | edit = opts.get(b'edit') |
|
1380 | 1379 | editform = opts.get(b'editform', b'mq.qnew') |
|
1381 | 1380 | user = opts.get(b'user') |
|
1382 | 1381 | date = opts.get(b'date') |
|
1383 | 1382 | if date: |
|
1384 | 1383 | date = dateutil.parsedate(date) |
|
1385 | 1384 | diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True) |
|
1386 | 1385 | if opts.get(b'checkname', True): |
|
1387 | 1386 | self.checkpatchname(patchfn) |
|
1388 | 1387 | inclsubs = checksubstate(repo) |
|
1389 | 1388 | if inclsubs: |
|
1390 | 1389 | substatestate = repo.dirstate[b'.hgsubstate'] |
|
1391 | 1390 | if opts.get(b'include') or opts.get(b'exclude') or pats: |
|
1392 | 1391 | # detect missing files in pats |
|
1393 | 1392 | def badfn(f, msg): |
|
1394 | 1393 | if f != b'.hgsubstate': # .hgsubstate is auto-created |
|
1395 | 1394 | raise error.Abort(b'%s: %s' % (f, msg)) |
|
1396 | 1395 | |
|
1397 | 1396 | match = scmutil.match(repo[None], pats, opts, badfn=badfn) |
|
1398 | 1397 | changes = repo.status(match=match) |
|
1399 | 1398 | else: |
|
1400 | 1399 | changes = self.checklocalchanges(repo, force=True) |
|
1401 | 1400 | commitfiles = list(inclsubs) |
|
1402 | 1401 | commitfiles.extend(changes.modified) |
|
1403 | 1402 | commitfiles.extend(changes.added) |
|
1404 | 1403 | commitfiles.extend(changes.removed) |
|
1405 | 1404 | match = scmutil.matchfiles(repo, commitfiles) |
|
1406 | 1405 | if len(repo[None].parents()) > 1: |
|
1407 | 1406 | raise error.Abort(_(b'cannot manage merge changesets')) |
|
1408 | 1407 | self.checktoppatch(repo) |
|
1409 | 1408 | insert = self.fullseriesend() |
|
1410 | 1409 | with repo.wlock(): |
|
1411 | 1410 | try: |
|
1412 | 1411 | # if patch file write fails, abort early |
|
1413 | 1412 | p = self.opener(patchfn, b"w") |
|
1414 | 1413 | except IOError as e: |
|
1415 | 1414 | raise error.Abort( |
|
1416 | 1415 | _(b'cannot write patch "%s": %s') |
|
1417 | 1416 | % (patchfn, encoding.strtolocal(e.strerror)) |
|
1418 | 1417 | ) |
|
1419 | 1418 | try: |
|
1420 | 1419 | defaultmsg = b"[mq]: %s" % patchfn |
|
1421 | 1420 | editor = cmdutil.getcommiteditor(editform=editform) |
|
1422 | 1421 | if edit: |
|
1423 | 1422 | |
|
1424 | 1423 | def finishdesc(desc): |
|
1425 | 1424 | if desc.rstrip(): |
|
1426 | 1425 | return desc |
|
1427 | 1426 | else: |
|
1428 | 1427 | return defaultmsg |
|
1429 | 1428 | |
|
1430 | 1429 | # i18n: this message is shown in editor with "HG: " prefix |
|
1431 | 1430 | extramsg = _(b'Leave message empty to use default message.') |
|
1432 | 1431 | editor = cmdutil.getcommiteditor( |
|
1433 | 1432 | finishdesc=finishdesc, |
|
1434 | 1433 | extramsg=extramsg, |
|
1435 | 1434 | editform=editform, |
|
1436 | 1435 | ) |
|
1437 | 1436 | commitmsg = msg |
|
1438 | 1437 | else: |
|
1439 | 1438 | commitmsg = msg or defaultmsg |
|
1440 | 1439 | |
|
1441 | 1440 | n = newcommit( |
|
1442 | 1441 | repo, |
|
1443 | 1442 | None, |
|
1444 | 1443 | commitmsg, |
|
1445 | 1444 | user, |
|
1446 | 1445 | date, |
|
1447 | 1446 | match=match, |
|
1448 | 1447 | force=True, |
|
1449 | 1448 | editor=editor, |
|
1450 | 1449 | ) |
|
1451 | 1450 | if n is None: |
|
1452 | 1451 | raise error.Abort(_(b"repo commit failed")) |
|
1453 | 1452 | try: |
|
1454 | 1453 | self.fullseries[insert:insert] = [patchfn] |
|
1455 | 1454 | self.applied.append(statusentry(n, patchfn)) |
|
1456 | 1455 | self.parseseries() |
|
1457 | 1456 | self.seriesdirty = True |
|
1458 | 1457 | self.applieddirty = True |
|
1459 | 1458 | nctx = repo[n] |
|
1460 | 1459 | ph = patchheader(self.join(patchfn), self.plainmode) |
|
1461 | 1460 | if user: |
|
1462 | 1461 | ph.setuser(user) |
|
1463 | 1462 | if date: |
|
1464 | 1463 | ph.setdate(b'%d %d' % date) |
|
1465 | 1464 | ph.setparent(hex(nctx.p1().node())) |
|
1466 | 1465 | msg = nctx.description().strip() |
|
1467 | 1466 | if msg == defaultmsg.strip(): |
|
1468 | 1467 | msg = b'' |
|
1469 | 1468 | ph.setmessage(msg) |
|
1470 | 1469 | p.write(bytes(ph)) |
|
1471 | 1470 | if commitfiles: |
|
1472 | 1471 | parent = self.qparents(repo, n) |
|
1473 | 1472 | if inclsubs: |
|
1474 | 1473 | self.putsubstate2changes(substatestate, changes) |
|
1475 | 1474 | chunks = patchmod.diff( |
|
1476 | 1475 | repo, |
|
1477 | 1476 | node1=parent, |
|
1478 | 1477 | node2=n, |
|
1479 | 1478 | changes=changes, |
|
1480 | 1479 | opts=diffopts, |
|
1481 | 1480 | ) |
|
1482 | 1481 | for chunk in chunks: |
|
1483 | 1482 | p.write(chunk) |
|
1484 | 1483 | p.close() |
|
1485 | 1484 | r = self.qrepo() |
|
1486 | 1485 | if r: |
|
1487 | 1486 | r[None].add([patchfn]) |
|
1488 | 1487 | except: # re-raises |
|
1489 | 1488 | repo.rollback() |
|
1490 | 1489 | raise |
|
1491 | 1490 | except Exception: |
|
1492 | 1491 | patchpath = self.join(patchfn) |
|
1493 | 1492 | try: |
|
1494 | 1493 | os.unlink(patchpath) |
|
1495 | 1494 | except OSError: |
|
1496 | 1495 | self.ui.warn(_(b'error unlinking %s\n') % patchpath) |
|
1497 | 1496 | raise |
|
1498 | 1497 | self.removeundo(repo) |
|
1499 | 1498 | |
|
1500 | 1499 | def isapplied(self, patch): |
|
1501 | 1500 | """returns (index, rev, patch)""" |
|
1502 | 1501 | for i, a in enumerate(self.applied): |
|
1503 | 1502 | if a.name == patch: |
|
1504 | 1503 | return (i, a.node, a.name) |
|
1505 | 1504 | return None |
|
1506 | 1505 | |
|
1507 | 1506 | # if the exact patch name does not exist, we try a few |
|
1508 | 1507 | # variations. If strict is passed, we try only #1 |
|
1509 | 1508 | # |
|
1510 | 1509 | # 1) a number (as string) to indicate an offset in the series file |
|
1511 | 1510 | # 2) a unique substring of the patch name was given |
|
1512 | 1511 | # 3) patchname[-+]num to indicate an offset in the series file |
|
1513 | 1512 | def lookup(self, patch, strict=False): |
|
1514 | 1513 | def partialname(s): |
|
1515 | 1514 | if s in self.series: |
|
1516 | 1515 | return s |
|
1517 | 1516 | matches = [x for x in self.series if s in x] |
|
1518 | 1517 | if len(matches) > 1: |
|
1519 | 1518 | self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s) |
|
1520 | 1519 | for m in matches: |
|
1521 | 1520 | self.ui.warn(b' %s\n' % m) |
|
1522 | 1521 | return None |
|
1523 | 1522 | if matches: |
|
1524 | 1523 | return matches[0] |
|
1525 | 1524 | if self.series and self.applied: |
|
1526 | 1525 | if s == b'qtip': |
|
1527 | 1526 | return self.series[self.seriesend(True) - 1] |
|
1528 | 1527 | if s == b'qbase': |
|
1529 | 1528 | return self.series[0] |
|
1530 | 1529 | return None |
|
1531 | 1530 | |
|
1532 | 1531 | if patch in self.series: |
|
1533 | 1532 | return patch |
|
1534 | 1533 | |
|
1535 | 1534 | if not os.path.isfile(self.join(patch)): |
|
1536 | 1535 | try: |
|
1537 | 1536 | sno = int(patch) |
|
1538 | 1537 | except (ValueError, OverflowError): |
|
1539 | 1538 | pass |
|
1540 | 1539 | else: |
|
1541 | 1540 | if -len(self.series) <= sno < len(self.series): |
|
1542 | 1541 | return self.series[sno] |
|
1543 | 1542 | |
|
1544 | 1543 | if not strict: |
|
1545 | 1544 | res = partialname(patch) |
|
1546 | 1545 | if res: |
|
1547 | 1546 | return res |
|
1548 | 1547 | minus = patch.rfind(b'-') |
|
1549 | 1548 | if minus >= 0: |
|
1550 | 1549 | res = partialname(patch[:minus]) |
|
1551 | 1550 | if res: |
|
1552 | 1551 | i = self.series.index(res) |
|
1553 | 1552 | try: |
|
1554 | 1553 | off = int(patch[minus + 1 :] or 1) |
|
1555 | 1554 | except (ValueError, OverflowError): |
|
1556 | 1555 | pass |
|
1557 | 1556 | else: |
|
1558 | 1557 | if i - off >= 0: |
|
1559 | 1558 | return self.series[i - off] |
|
1560 | 1559 | plus = patch.rfind(b'+') |
|
1561 | 1560 | if plus >= 0: |
|
1562 | 1561 | res = partialname(patch[:plus]) |
|
1563 | 1562 | if res: |
|
1564 | 1563 | i = self.series.index(res) |
|
1565 | 1564 | try: |
|
1566 | 1565 | off = int(patch[plus + 1 :] or 1) |
|
1567 | 1566 | except (ValueError, OverflowError): |
|
1568 | 1567 | pass |
|
1569 | 1568 | else: |
|
1570 | 1569 | if i + off < len(self.series): |
|
1571 | 1570 | return self.series[i + off] |
|
1572 | 1571 | raise error.Abort(_(b"patch %s not in series") % patch) |
|
1573 | 1572 | |
|
1574 | 1573 | def push( |
|
1575 | 1574 | self, |
|
1576 | 1575 | repo, |
|
1577 | 1576 | patch=None, |
|
1578 | 1577 | force=False, |
|
1579 | 1578 | list=False, |
|
1580 | 1579 | mergeq=None, |
|
1581 | 1580 | all=False, |
|
1582 | 1581 | move=False, |
|
1583 | 1582 | exact=False, |
|
1584 | 1583 | nobackup=False, |
|
1585 | 1584 | keepchanges=False, |
|
1586 | 1585 | ): |
|
1587 | 1586 | self.checkkeepchanges(keepchanges, force) |
|
1588 | 1587 | diffopts = self.diffopts() |
|
1589 | 1588 | with repo.wlock(): |
|
1590 | 1589 | heads = [] |
|
1591 | 1590 | for hs in repo.branchmap().iterheads(): |
|
1592 | 1591 | heads.extend(hs) |
|
1593 | 1592 | if not heads: |
|
1594 | heads = [nullid] | |
|
1593 | heads = [repo.nullid] | |
|
1595 | 1594 | if repo.dirstate.p1() not in heads and not exact: |
|
1596 | 1595 | self.ui.status(_(b"(working directory not at a head)\n")) |
|
1597 | 1596 | |
|
1598 | 1597 | if not self.series: |
|
1599 | 1598 | self.ui.warn(_(b'no patches in series\n')) |
|
1600 | 1599 | return 0 |
|
1601 | 1600 | |
|
1602 | 1601 | # Suppose our series file is: A B C and the current 'top' |
|
1603 | 1602 | # patch is B. qpush C should be performed (moving forward) |
|
1604 | 1603 | # qpush B is a NOP (no change) qpush A is an error (can't |
|
1605 | 1604 | # go backwards with qpush) |
|
1606 | 1605 | if patch: |
|
1607 | 1606 | patch = self.lookup(patch) |
|
1608 | 1607 | info = self.isapplied(patch) |
|
1609 | 1608 | if info and info[0] >= len(self.applied) - 1: |
|
1610 | 1609 | self.ui.warn( |
|
1611 | 1610 | _(b'qpush: %s is already at the top\n') % patch |
|
1612 | 1611 | ) |
|
1613 | 1612 | return 0 |
|
1614 | 1613 | |
|
1615 | 1614 | pushable, reason = self.pushable(patch) |
|
1616 | 1615 | if pushable: |
|
1617 | 1616 | if self.series.index(patch) < self.seriesend(): |
|
1618 | 1617 | raise error.Abort( |
|
1619 | 1618 | _(b"cannot push to a previous patch: %s") % patch |
|
1620 | 1619 | ) |
|
1621 | 1620 | else: |
|
1622 | 1621 | if reason: |
|
1623 | 1622 | reason = _(b'guarded by %s') % reason |
|
1624 | 1623 | else: |
|
1625 | 1624 | reason = _(b'no matching guards') |
|
1626 | 1625 | self.ui.warn( |
|
1627 | 1626 | _(b"cannot push '%s' - %s\n") % (patch, reason) |
|
1628 | 1627 | ) |
|
1629 | 1628 | return 1 |
|
1630 | 1629 | elif all: |
|
1631 | 1630 | patch = self.series[-1] |
|
1632 | 1631 | if self.isapplied(patch): |
|
1633 | 1632 | self.ui.warn(_(b'all patches are currently applied\n')) |
|
1634 | 1633 | return 0 |
|
1635 | 1634 | |
|
1636 | 1635 | # Following the above example, starting at 'top' of B: |
|
1637 | 1636 | # qpush should be performed (pushes C), but a subsequent |
|
1638 | 1637 | # qpush without an argument is an error (nothing to |
|
1639 | 1638 | # apply). This allows a loop of "...while hg qpush..." to |
|
1640 | 1639 | # work as it detects an error when done |
|
1641 | 1640 | start = self.seriesend() |
|
1642 | 1641 | if start == len(self.series): |
|
1643 | 1642 | self.ui.warn(_(b'patch series already fully applied\n')) |
|
1644 | 1643 | return 1 |
|
1645 | 1644 | if not force and not keepchanges: |
|
1646 | 1645 | self.checklocalchanges(repo, refresh=self.applied) |
|
1647 | 1646 | |
|
1648 | 1647 | if exact: |
|
1649 | 1648 | if keepchanges: |
|
1650 | 1649 | raise error.Abort( |
|
1651 | 1650 | _(b"cannot use --exact and --keep-changes together") |
|
1652 | 1651 | ) |
|
1653 | 1652 | if move: |
|
1654 | 1653 | raise error.Abort( |
|
1655 | 1654 | _(b'cannot use --exact and --move together') |
|
1656 | 1655 | ) |
|
1657 | 1656 | if self.applied: |
|
1658 | 1657 | raise error.Abort( |
|
1659 | 1658 | _(b'cannot push --exact with applied patches') |
|
1660 | 1659 | ) |
|
1661 | 1660 | root = self.series[start] |
|
1662 | 1661 | target = patchheader(self.join(root), self.plainmode).parent |
|
1663 | 1662 | if not target: |
|
1664 | 1663 | raise error.Abort( |
|
1665 | 1664 | _(b"%s does not have a parent recorded") % root |
|
1666 | 1665 | ) |
|
1667 | 1666 | if not repo[target] == repo[b'.']: |
|
1668 | 1667 | hg.update(repo, target) |
|
1669 | 1668 | |
|
1670 | 1669 | if move: |
|
1671 | 1670 | if not patch: |
|
1672 | 1671 | raise error.Abort(_(b"please specify the patch to move")) |
|
1673 | 1672 | for fullstart, rpn in enumerate(self.fullseries): |
|
1674 | 1673 | # strip markers for patch guards |
|
1675 | 1674 | if self.guard_re.split(rpn, 1)[0] == self.series[start]: |
|
1676 | 1675 | break |
|
1677 | 1676 | for i, rpn in enumerate(self.fullseries[fullstart:]): |
|
1678 | 1677 | # strip markers for patch guards |
|
1679 | 1678 | if self.guard_re.split(rpn, 1)[0] == patch: |
|
1680 | 1679 | break |
|
1681 | 1680 | index = fullstart + i |
|
1682 | 1681 | assert index < len(self.fullseries) |
|
1683 | 1682 | fullpatch = self.fullseries[index] |
|
1684 | 1683 | del self.fullseries[index] |
|
1685 | 1684 | self.fullseries.insert(fullstart, fullpatch) |
|
1686 | 1685 | self.parseseries() |
|
1687 | 1686 | self.seriesdirty = True |
|
1688 | 1687 | |
|
1689 | 1688 | self.applieddirty = True |
|
1690 | 1689 | if start > 0: |
|
1691 | 1690 | self.checktoppatch(repo) |
|
1692 | 1691 | if not patch: |
|
1693 | 1692 | patch = self.series[start] |
|
1694 | 1693 | end = start + 1 |
|
1695 | 1694 | else: |
|
1696 | 1695 | end = self.series.index(patch, start) + 1 |
|
1697 | 1696 | |
|
1698 | 1697 | tobackup = set() |
|
1699 | 1698 | if (not nobackup and force) or keepchanges: |
|
1700 | 1699 | status = self.checklocalchanges(repo, force=True) |
|
1701 | 1700 | if keepchanges: |
|
1702 | 1701 | tobackup.update( |
|
1703 | 1702 | status.modified |
|
1704 | 1703 | + status.added |
|
1705 | 1704 | + status.removed |
|
1706 | 1705 | + status.deleted |
|
1707 | 1706 | ) |
|
1708 | 1707 | else: |
|
1709 | 1708 | tobackup.update(status.modified + status.added) |
|
1710 | 1709 | |
|
1711 | 1710 | s = self.series[start:end] |
|
1712 | 1711 | all_files = set() |
|
1713 | 1712 | try: |
|
1714 | 1713 | if mergeq: |
|
1715 | 1714 | ret = self.mergepatch(repo, mergeq, s, diffopts) |
|
1716 | 1715 | else: |
|
1717 | 1716 | ret = self.apply( |
|
1718 | 1717 | repo, |
|
1719 | 1718 | s, |
|
1720 | 1719 | list, |
|
1721 | 1720 | all_files=all_files, |
|
1722 | 1721 | tobackup=tobackup, |
|
1723 | 1722 | keepchanges=keepchanges, |
|
1724 | 1723 | ) |
|
1725 | 1724 | except AbortNoCleanup: |
|
1726 | 1725 | raise |
|
1727 | 1726 | except: # re-raises |
|
1728 | 1727 | self.ui.warn(_(b'cleaning up working directory...\n')) |
|
1729 | 1728 | cmdutil.revert( |
|
1730 | 1729 | self.ui, |
|
1731 | 1730 | repo, |
|
1732 | 1731 | repo[b'.'], |
|
1733 | 1732 | no_backup=True, |
|
1734 | 1733 | ) |
|
1735 | 1734 | # only remove unknown files that we know we touched or |
|
1736 | 1735 | # created while patching |
|
1737 | 1736 | for f in all_files: |
|
1738 | 1737 | if f not in repo.dirstate: |
|
1739 | 1738 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1740 | 1739 | self.ui.warn(_(b'done\n')) |
|
1741 | 1740 | raise |
|
1742 | 1741 | |
|
1743 | 1742 | if not self.applied: |
|
1744 | 1743 | return ret[0] |
|
1745 | 1744 | top = self.applied[-1].name |
|
1746 | 1745 | if ret[0] and ret[0] > 1: |
|
1747 | 1746 | msg = _(b"errors during apply, please fix and qrefresh %s\n") |
|
1748 | 1747 | self.ui.write(msg % top) |
|
1749 | 1748 | else: |
|
1750 | 1749 | self.ui.write(_(b"now at: %s\n") % top) |
|
1751 | 1750 | return ret[0] |
|
1752 | 1751 | |
|
1753 | 1752 | def pop( |
|
1754 | 1753 | self, |
|
1755 | 1754 | repo, |
|
1756 | 1755 | patch=None, |
|
1757 | 1756 | force=False, |
|
1758 | 1757 | update=True, |
|
1759 | 1758 | all=False, |
|
1760 | 1759 | nobackup=False, |
|
1761 | 1760 | keepchanges=False, |
|
1762 | 1761 | ): |
|
1763 | 1762 | self.checkkeepchanges(keepchanges, force) |
|
1764 | 1763 | with repo.wlock(): |
|
1765 | 1764 | if patch: |
|
1766 | 1765 | # index, rev, patch |
|
1767 | 1766 | info = self.isapplied(patch) |
|
1768 | 1767 | if not info: |
|
1769 | 1768 | patch = self.lookup(patch) |
|
1770 | 1769 | info = self.isapplied(patch) |
|
1771 | 1770 | if not info: |
|
1772 | 1771 | raise error.Abort(_(b"patch %s is not applied") % patch) |
|
1773 | 1772 | |
|
1774 | 1773 | if not self.applied: |
|
1775 | 1774 | # Allow qpop -a to work repeatedly, |
|
1776 | 1775 | # but not qpop without an argument |
|
1777 | 1776 | self.ui.warn(_(b"no patches applied\n")) |
|
1778 | 1777 | return not all |
|
1779 | 1778 | |
|
1780 | 1779 | if all: |
|
1781 | 1780 | start = 0 |
|
1782 | 1781 | elif patch: |
|
1783 | 1782 | start = info[0] + 1 |
|
1784 | 1783 | else: |
|
1785 | 1784 | start = len(self.applied) - 1 |
|
1786 | 1785 | |
|
1787 | 1786 | if start >= len(self.applied): |
|
1788 | 1787 | self.ui.warn(_(b"qpop: %s is already at the top\n") % patch) |
|
1789 | 1788 | return |
|
1790 | 1789 | |
|
1791 | 1790 | if not update: |
|
1792 | 1791 | parents = repo.dirstate.parents() |
|
1793 | 1792 | rr = [x.node for x in self.applied] |
|
1794 | 1793 | for p in parents: |
|
1795 | 1794 | if p in rr: |
|
1796 | 1795 | self.ui.warn(_(b"qpop: forcing dirstate update\n")) |
|
1797 | 1796 | update = True |
|
1798 | 1797 | else: |
|
1799 | 1798 | parents = [p.node() for p in repo[None].parents()] |
|
1800 | 1799 | update = any( |
|
1801 | 1800 | entry.node in parents for entry in self.applied[start:] |
|
1802 | 1801 | ) |
|
1803 | 1802 | |
|
1804 | 1803 | tobackup = set() |
|
1805 | 1804 | if update: |
|
1806 | 1805 | s = self.checklocalchanges(repo, force=force or keepchanges) |
|
1807 | 1806 | if force: |
|
1808 | 1807 | if not nobackup: |
|
1809 | 1808 | tobackup.update(s.modified + s.added) |
|
1810 | 1809 | elif keepchanges: |
|
1811 | 1810 | tobackup.update( |
|
1812 | 1811 | s.modified + s.added + s.removed + s.deleted |
|
1813 | 1812 | ) |
|
1814 | 1813 | |
|
1815 | 1814 | self.applieddirty = True |
|
1816 | 1815 | end = len(self.applied) |
|
1817 | 1816 | rev = self.applied[start].node |
|
1818 | 1817 | |
|
1819 | 1818 | try: |
|
1820 | 1819 | heads = repo.changelog.heads(rev) |
|
1821 | 1820 | except error.LookupError: |
|
1822 | 1821 | node = short(rev) |
|
1823 | 1822 | raise error.Abort(_(b'trying to pop unknown node %s') % node) |
|
1824 | 1823 | |
|
1825 | 1824 | if heads != [self.applied[-1].node]: |
|
1826 | 1825 | raise error.Abort( |
|
1827 | 1826 | _( |
|
1828 | 1827 | b"popping would remove a revision not " |
|
1829 | 1828 | b"managed by this patch queue" |
|
1830 | 1829 | ) |
|
1831 | 1830 | ) |
|
1832 | 1831 | if not repo[self.applied[-1].node].mutable(): |
|
1833 | 1832 | raise error.Abort( |
|
1834 | 1833 | _(b"popping would remove a public revision"), |
|
1835 | 1834 | hint=_(b"see 'hg help phases' for details"), |
|
1836 | 1835 | ) |
|
1837 | 1836 | |
|
1838 | 1837 | # we know there are no local changes, so we can make a simplified |
|
1839 | 1838 | # form of hg.update. |
|
1840 | 1839 | if update: |
|
1841 | 1840 | qp = self.qparents(repo, rev) |
|
1842 | 1841 | ctx = repo[qp] |
|
1843 | 1842 | st = repo.status(qp, b'.') |
|
1844 | 1843 | m, a, r, d = st.modified, st.added, st.removed, st.deleted |
|
1845 | 1844 | if d: |
|
1846 | 1845 | raise error.Abort(_(b"deletions found between repo revs")) |
|
1847 | 1846 | |
|
1848 | 1847 | tobackup = set(a + m + r) & tobackup |
|
1849 | 1848 | if keepchanges and tobackup: |
|
1850 | 1849 | raise error.Abort(_(b"local changes found, qrefresh first")) |
|
1851 | 1850 | self.backup(repo, tobackup) |
|
1852 | 1851 | with repo.dirstate.parentchange(): |
|
1853 | 1852 | for f in a: |
|
1854 | 1853 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1855 | 1854 | repo.dirstate.drop(f) |
|
1856 | 1855 | for f in m + r: |
|
1857 | 1856 | fctx = ctx[f] |
|
1858 | 1857 | repo.wwrite(f, fctx.data(), fctx.flags()) |
|
1859 | 1858 | repo.dirstate.normal(f) |
|
1860 | repo.setparents(qp, nullid) | |
|
1859 | repo.setparents(qp, repo.nullid) | |
|
1861 | 1860 | for patch in reversed(self.applied[start:end]): |
|
1862 | 1861 | self.ui.status(_(b"popping %s\n") % patch.name) |
|
1863 | 1862 | del self.applied[start:end] |
|
1864 | 1863 | strip(self.ui, repo, [rev], update=False, backup=False) |
|
1865 | 1864 | for s, state in repo[b'.'].substate.items(): |
|
1866 | 1865 | repo[b'.'].sub(s).get(state) |
|
1867 | 1866 | if self.applied: |
|
1868 | 1867 | self.ui.write(_(b"now at: %s\n") % self.applied[-1].name) |
|
1869 | 1868 | else: |
|
1870 | 1869 | self.ui.write(_(b"patch queue now empty\n")) |
|
1871 | 1870 | |
|
1872 | 1871 | def diff(self, repo, pats, opts): |
|
1873 | 1872 | top, patch = self.checktoppatch(repo) |
|
1874 | 1873 | if not top: |
|
1875 | 1874 | self.ui.write(_(b"no patches applied\n")) |
|
1876 | 1875 | return |
|
1877 | 1876 | qp = self.qparents(repo, top) |
|
1878 | 1877 | if opts.get(b'reverse'): |
|
1879 | 1878 | node1, node2 = None, qp |
|
1880 | 1879 | else: |
|
1881 | 1880 | node1, node2 = qp, None |
|
1882 | 1881 | diffopts = self.diffopts(opts, patch) |
|
1883 | 1882 | self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts) |
|
1884 | 1883 | |
|
1885 | 1884 | def refresh(self, repo, pats=None, **opts): |
|
1886 | 1885 | opts = pycompat.byteskwargs(opts) |
|
1887 | 1886 | if not self.applied: |
|
1888 | 1887 | self.ui.write(_(b"no patches applied\n")) |
|
1889 | 1888 | return 1 |
|
1890 | 1889 | msg = opts.get(b'msg', b'').rstrip() |
|
1891 | 1890 | edit = opts.get(b'edit') |
|
1892 | 1891 | editform = opts.get(b'editform', b'mq.qrefresh') |
|
1893 | 1892 | newuser = opts.get(b'user') |
|
1894 | 1893 | newdate = opts.get(b'date') |
|
1895 | 1894 | if newdate: |
|
1896 | 1895 | newdate = b'%d %d' % dateutil.parsedate(newdate) |
|
1897 | 1896 | wlock = repo.wlock() |
|
1898 | 1897 | |
|
1899 | 1898 | try: |
|
1900 | 1899 | self.checktoppatch(repo) |
|
1901 | 1900 | (top, patchfn) = (self.applied[-1].node, self.applied[-1].name) |
|
1902 | 1901 | if repo.changelog.heads(top) != [top]: |
|
1903 | 1902 | raise error.Abort( |
|
1904 | 1903 | _(b"cannot qrefresh a revision with children") |
|
1905 | 1904 | ) |
|
1906 | 1905 | if not repo[top].mutable(): |
|
1907 | 1906 | raise error.Abort( |
|
1908 | 1907 | _(b"cannot qrefresh public revision"), |
|
1909 | 1908 | hint=_(b"see 'hg help phases' for details"), |
|
1910 | 1909 | ) |
|
1911 | 1910 | |
|
1912 | 1911 | cparents = repo.changelog.parents(top) |
|
1913 | 1912 | patchparent = self.qparents(repo, top) |
|
1914 | 1913 | |
|
1915 | 1914 | inclsubs = checksubstate(repo, patchparent) |
|
1916 | 1915 | if inclsubs: |
|
1917 | 1916 | substatestate = repo.dirstate[b'.hgsubstate'] |
|
1918 | 1917 | |
|
1919 | 1918 | ph = patchheader(self.join(patchfn), self.plainmode) |
|
1920 | 1919 | diffopts = self.diffopts( |
|
1921 | 1920 | {b'git': opts.get(b'git')}, patchfn, plain=True |
|
1922 | 1921 | ) |
|
1923 | 1922 | if newuser: |
|
1924 | 1923 | ph.setuser(newuser) |
|
1925 | 1924 | if newdate: |
|
1926 | 1925 | ph.setdate(newdate) |
|
1927 | 1926 | ph.setparent(hex(patchparent)) |
|
1928 | 1927 | |
|
1929 | 1928 | # only commit new patch when write is complete |
|
1930 | 1929 | patchf = self.opener(patchfn, b'w', atomictemp=True) |
|
1931 | 1930 | |
|
1932 | 1931 | # update the dirstate in place, strip off the qtip commit |
|
1933 | 1932 | # and then commit. |
|
1934 | 1933 | # |
|
1935 | 1934 | # this should really read: |
|
1936 | 1935 | # st = repo.status(top, patchparent) |
|
1937 | 1936 | # but we do it backwards to take advantage of manifest/changelog |
|
1938 | 1937 | # caching against the next repo.status call |
|
1939 | 1938 | st = repo.status(patchparent, top) |
|
1940 | 1939 | mm, aa, dd = st.modified, st.added, st.removed |
|
1941 | 1940 | ctx = repo[top] |
|
1942 | 1941 | aaa = aa[:] |
|
1943 | 1942 | match1 = scmutil.match(repo[None], pats, opts) |
|
1944 | 1943 | # in short mode, we only diff the files included in the |
|
1945 | 1944 | # patch already plus specified files |
|
1946 | 1945 | if opts.get(b'short'): |
|
1947 | 1946 | # if amending a patch, we start with existing |
|
1948 | 1947 | # files plus specified files - unfiltered |
|
1949 | 1948 | match = scmutil.matchfiles(repo, mm + aa + dd + match1.files()) |
|
1950 | 1949 | # filter with include/exclude options |
|
1951 | 1950 | match1 = scmutil.match(repo[None], opts=opts) |
|
1952 | 1951 | else: |
|
1953 | 1952 | match = scmutil.matchall(repo) |
|
1954 | 1953 | stb = repo.status(match=match) |
|
1955 | 1954 | m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted |
|
1956 | 1955 | mm = set(mm) |
|
1957 | 1956 | aa = set(aa) |
|
1958 | 1957 | dd = set(dd) |
|
1959 | 1958 | |
|
1960 | 1959 | # we might end up with files that were added between |
|
1961 | 1960 | # qtip and the dirstate parent, but then changed in the |
|
1962 | 1961 | # local dirstate. in this case, we want them to only |
|
1963 | 1962 | # show up in the added section |
|
1964 | 1963 | for x in m: |
|
1965 | 1964 | if x not in aa: |
|
1966 | 1965 | mm.add(x) |
|
1967 | 1966 | # we might end up with files added by the local dirstate that |
|
1968 | 1967 | # were deleted by the patch. In this case, they should only |
|
1969 | 1968 | # show up in the changed section. |
|
1970 | 1969 | for x in a: |
|
1971 | 1970 | if x in dd: |
|
1972 | 1971 | dd.remove(x) |
|
1973 | 1972 | mm.add(x) |
|
1974 | 1973 | else: |
|
1975 | 1974 | aa.add(x) |
|
1976 | 1975 | # make sure any files deleted in the local dirstate |
|
1977 | 1976 | # are not in the add or change column of the patch |
|
1978 | 1977 | forget = [] |
|
1979 | 1978 | for x in d + r: |
|
1980 | 1979 | if x in aa: |
|
1981 | 1980 | aa.remove(x) |
|
1982 | 1981 | forget.append(x) |
|
1983 | 1982 | continue |
|
1984 | 1983 | else: |
|
1985 | 1984 | mm.discard(x) |
|
1986 | 1985 | dd.add(x) |
|
1987 | 1986 | |
|
1988 | 1987 | m = list(mm) |
|
1989 | 1988 | r = list(dd) |
|
1990 | 1989 | a = list(aa) |
|
1991 | 1990 | |
|
1992 | 1991 | # create 'match' that includes the files to be recommitted. |
|
1993 | 1992 | # apply match1 via repo.status to ensure correct case handling. |
|
1994 | 1993 | st = repo.status(patchparent, match=match1) |
|
1995 | 1994 | cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted |
|
1996 | 1995 | allmatches = set(cm + ca + cr + cd) |
|
1997 | 1996 | refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)] |
|
1998 | 1997 | |
|
1999 | 1998 | files = set(inclsubs) |
|
2000 | 1999 | for x in refreshchanges: |
|
2001 | 2000 | files.update(x) |
|
2002 | 2001 | match = scmutil.matchfiles(repo, files) |
|
2003 | 2002 | |
|
2004 | 2003 | bmlist = repo[top].bookmarks() |
|
2005 | 2004 | |
|
2006 | 2005 | dsguard = None |
|
2007 | 2006 | try: |
|
2008 | 2007 | dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh') |
|
2009 | 2008 | if diffopts.git or diffopts.upgrade: |
|
2010 | 2009 | copies = {} |
|
2011 | 2010 | for dst in a: |
|
2012 | 2011 | src = repo.dirstate.copied(dst) |
|
2013 | 2012 | # during qfold, the source file for copies may |
|
2014 | 2013 | # be removed. Treat this as a simple add. |
|
2015 | 2014 | if src is not None and src in repo.dirstate: |
|
2016 | 2015 | copies.setdefault(src, []).append(dst) |
|
2017 | 2016 | repo.dirstate.add(dst) |
|
2018 | 2017 | # remember the copies between patchparent and qtip |
|
2019 | 2018 | for dst in aaa: |
|
2020 | 2019 | src = ctx[dst].copysource() |
|
2021 | 2020 | if src: |
|
2022 | 2021 | copies.setdefault(src, []).extend( |
|
2023 | 2022 | copies.get(dst, []) |
|
2024 | 2023 | ) |
|
2025 | 2024 | if dst in a: |
|
2026 | 2025 | copies[src].append(dst) |
|
2027 | 2026 | # we can't copy a file created by the patch itself |
|
2028 | 2027 | if dst in copies: |
|
2029 | 2028 | del copies[dst] |
|
2030 | 2029 | for src, dsts in pycompat.iteritems(copies): |
|
2031 | 2030 | for dst in dsts: |
|
2032 | 2031 | repo.dirstate.copy(src, dst) |
|
2033 | 2032 | else: |
|
2034 | 2033 | for dst in a: |
|
2035 | 2034 | repo.dirstate.add(dst) |
|
2036 | 2035 | # Drop useless copy information |
|
2037 | 2036 | for f in list(repo.dirstate.copies()): |
|
2038 | 2037 | repo.dirstate.copy(None, f) |
|
2039 | 2038 | for f in r: |
|
2040 | 2039 | repo.dirstate.remove(f) |
|
2041 | 2040 | # if the patch excludes a modified file, mark that |
|
2042 | 2041 | # file with mtime=0 so status can see it. |
|
2043 | 2042 | mm = [] |
|
2044 | 2043 | for i in pycompat.xrange(len(m) - 1, -1, -1): |
|
2045 | 2044 | if not match1(m[i]): |
|
2046 | 2045 | mm.append(m[i]) |
|
2047 | 2046 | del m[i] |
|
2048 | 2047 | for f in m: |
|
2049 | 2048 | repo.dirstate.normal(f) |
|
2050 | 2049 | for f in mm: |
|
2051 | 2050 | repo.dirstate.normallookup(f) |
|
2052 | 2051 | for f in forget: |
|
2053 | 2052 | repo.dirstate.drop(f) |
|
2054 | 2053 | |
|
2055 | 2054 | user = ph.user or ctx.user() |
|
2056 | 2055 | |
|
2057 | 2056 | oldphase = repo[top].phase() |
|
2058 | 2057 | |
|
2059 | 2058 | # assumes strip can roll itself back if interrupted |
|
2060 | 2059 | repo.setparents(*cparents) |
|
2061 | 2060 | self.applied.pop() |
|
2062 | 2061 | self.applieddirty = True |
|
2063 | 2062 | strip(self.ui, repo, [top], update=False, backup=False) |
|
2064 | 2063 | dsguard.close() |
|
2065 | 2064 | finally: |
|
2066 | 2065 | release(dsguard) |
|
2067 | 2066 | |
|
2068 | 2067 | try: |
|
2069 | 2068 | # might be nice to attempt to roll back strip after this |
|
2070 | 2069 | |
|
2071 | 2070 | defaultmsg = b"[mq]: %s" % patchfn |
|
2072 | 2071 | editor = cmdutil.getcommiteditor(editform=editform) |
|
2073 | 2072 | if edit: |
|
2074 | 2073 | |
|
2075 | 2074 | def finishdesc(desc): |
|
2076 | 2075 | if desc.rstrip(): |
|
2077 | 2076 | ph.setmessage(desc) |
|
2078 | 2077 | return desc |
|
2079 | 2078 | return defaultmsg |
|
2080 | 2079 | |
|
2081 | 2080 | # i18n: this message is shown in editor with "HG: " prefix |
|
2082 | 2081 | extramsg = _(b'Leave message empty to use default message.') |
|
2083 | 2082 | editor = cmdutil.getcommiteditor( |
|
2084 | 2083 | finishdesc=finishdesc, |
|
2085 | 2084 | extramsg=extramsg, |
|
2086 | 2085 | editform=editform, |
|
2087 | 2086 | ) |
|
2088 | 2087 | message = msg or b"\n".join(ph.message) |
|
2089 | 2088 | elif not msg: |
|
2090 | 2089 | if not ph.message: |
|
2091 | 2090 | message = defaultmsg |
|
2092 | 2091 | else: |
|
2093 | 2092 | message = b"\n".join(ph.message) |
|
2094 | 2093 | else: |
|
2095 | 2094 | message = msg |
|
2096 | 2095 | ph.setmessage(msg) |
|
2097 | 2096 | |
|
2098 | 2097 | # Ensure we create a new changeset in the same phase than |
|
2099 | 2098 | # the old one. |
|
2100 | 2099 | lock = tr = None |
|
2101 | 2100 | try: |
|
2102 | 2101 | lock = repo.lock() |
|
2103 | 2102 | tr = repo.transaction(b'mq') |
|
2104 | 2103 | n = newcommit( |
|
2105 | 2104 | repo, |
|
2106 | 2105 | oldphase, |
|
2107 | 2106 | message, |
|
2108 | 2107 | user, |
|
2109 | 2108 | ph.date, |
|
2110 | 2109 | match=match, |
|
2111 | 2110 | force=True, |
|
2112 | 2111 | editor=editor, |
|
2113 | 2112 | ) |
|
2114 | 2113 | # only write patch after a successful commit |
|
2115 | 2114 | c = [list(x) for x in refreshchanges] |
|
2116 | 2115 | if inclsubs: |
|
2117 | 2116 | self.putsubstate2changes(substatestate, c) |
|
2118 | 2117 | chunks = patchmod.diff( |
|
2119 | 2118 | repo, patchparent, changes=c, opts=diffopts |
|
2120 | 2119 | ) |
|
2121 | 2120 | comments = bytes(ph) |
|
2122 | 2121 | if comments: |
|
2123 | 2122 | patchf.write(comments) |
|
2124 | 2123 | for chunk in chunks: |
|
2125 | 2124 | patchf.write(chunk) |
|
2126 | 2125 | patchf.close() |
|
2127 | 2126 | |
|
2128 | 2127 | marks = repo._bookmarks |
|
2129 | 2128 | marks.applychanges(repo, tr, [(bm, n) for bm in bmlist]) |
|
2130 | 2129 | tr.close() |
|
2131 | 2130 | |
|
2132 | 2131 | self.applied.append(statusentry(n, patchfn)) |
|
2133 | 2132 | finally: |
|
2134 | 2133 | lockmod.release(tr, lock) |
|
2135 | 2134 | except: # re-raises |
|
2136 | 2135 | ctx = repo[cparents[0]] |
|
2137 | 2136 | repo.dirstate.rebuild(ctx.node(), ctx.manifest()) |
|
2138 | 2137 | self.savedirty() |
|
2139 | 2138 | self.ui.warn( |
|
2140 | 2139 | _( |
|
2141 | 2140 | b'qrefresh interrupted while patch was popped! ' |
|
2142 | 2141 | b'(revert --all, qpush to recover)\n' |
|
2143 | 2142 | ) |
|
2144 | 2143 | ) |
|
2145 | 2144 | raise |
|
2146 | 2145 | finally: |
|
2147 | 2146 | wlock.release() |
|
2148 | 2147 | self.removeundo(repo) |
|
2149 | 2148 | |
|
2150 | 2149 | def init(self, repo, create=False): |
|
2151 | 2150 | if not create and os.path.isdir(self.path): |
|
2152 | 2151 | raise error.Abort(_(b"patch queue directory already exists")) |
|
2153 | 2152 | try: |
|
2154 | 2153 | os.mkdir(self.path) |
|
2155 | 2154 | except OSError as inst: |
|
2156 | 2155 | if inst.errno != errno.EEXIST or not create: |
|
2157 | 2156 | raise |
|
2158 | 2157 | if create: |
|
2159 | 2158 | return self.qrepo(create=True) |
|
2160 | 2159 | |
|
2161 | 2160 | def unapplied(self, repo, patch=None): |
|
2162 | 2161 | if patch and patch not in self.series: |
|
2163 | 2162 | raise error.Abort(_(b"patch %s is not in series file") % patch) |
|
2164 | 2163 | if not patch: |
|
2165 | 2164 | start = self.seriesend() |
|
2166 | 2165 | else: |
|
2167 | 2166 | start = self.series.index(patch) + 1 |
|
2168 | 2167 | unapplied = [] |
|
2169 | 2168 | for i in pycompat.xrange(start, len(self.series)): |
|
2170 | 2169 | pushable, reason = self.pushable(i) |
|
2171 | 2170 | if pushable: |
|
2172 | 2171 | unapplied.append((i, self.series[i])) |
|
2173 | 2172 | self.explainpushable(i) |
|
2174 | 2173 | return unapplied |
|
2175 | 2174 | |
|
2176 | 2175 | def qseries( |
|
2177 | 2176 | self, |
|
2178 | 2177 | repo, |
|
2179 | 2178 | missing=None, |
|
2180 | 2179 | start=0, |
|
2181 | 2180 | length=None, |
|
2182 | 2181 | status=None, |
|
2183 | 2182 | summary=False, |
|
2184 | 2183 | ): |
|
2185 | 2184 | def displayname(pfx, patchname, state): |
|
2186 | 2185 | if pfx: |
|
2187 | 2186 | self.ui.write(pfx) |
|
2188 | 2187 | if summary: |
|
2189 | 2188 | ph = patchheader(self.join(patchname), self.plainmode) |
|
2190 | 2189 | if ph.message: |
|
2191 | 2190 | msg = ph.message[0] |
|
2192 | 2191 | else: |
|
2193 | 2192 | msg = b'' |
|
2194 | 2193 | |
|
2195 | 2194 | if self.ui.formatted(): |
|
2196 | 2195 | width = self.ui.termwidth() - len(pfx) - len(patchname) - 2 |
|
2197 | 2196 | if width > 0: |
|
2198 | 2197 | msg = stringutil.ellipsis(msg, width) |
|
2199 | 2198 | else: |
|
2200 | 2199 | msg = b'' |
|
2201 | 2200 | self.ui.write(patchname, label=b'qseries.' + state) |
|
2202 | 2201 | self.ui.write(b': ') |
|
2203 | 2202 | self.ui.write(msg, label=b'qseries.message.' + state) |
|
2204 | 2203 | else: |
|
2205 | 2204 | self.ui.write(patchname, label=b'qseries.' + state) |
|
2206 | 2205 | self.ui.write(b'\n') |
|
2207 | 2206 | |
|
2208 | 2207 | applied = {p.name for p in self.applied} |
|
2209 | 2208 | if length is None: |
|
2210 | 2209 | length = len(self.series) - start |
|
2211 | 2210 | if not missing: |
|
2212 | 2211 | if self.ui.verbose: |
|
2213 | 2212 | idxwidth = len(b"%d" % (start + length - 1)) |
|
2214 | 2213 | for i in pycompat.xrange(start, start + length): |
|
2215 | 2214 | patch = self.series[i] |
|
2216 | 2215 | if patch in applied: |
|
2217 | 2216 | char, state = b'A', b'applied' |
|
2218 | 2217 | elif self.pushable(i)[0]: |
|
2219 | 2218 | char, state = b'U', b'unapplied' |
|
2220 | 2219 | else: |
|
2221 | 2220 | char, state = b'G', b'guarded' |
|
2222 | 2221 | pfx = b'' |
|
2223 | 2222 | if self.ui.verbose: |
|
2224 | 2223 | pfx = b'%*d %s ' % (idxwidth, i, char) |
|
2225 | 2224 | elif status and status != char: |
|
2226 | 2225 | continue |
|
2227 | 2226 | displayname(pfx, patch, state) |
|
2228 | 2227 | else: |
|
2229 | 2228 | msng_list = [] |
|
2230 | 2229 | for root, dirs, files in os.walk(self.path): |
|
2231 | 2230 | d = root[len(self.path) + 1 :] |
|
2232 | 2231 | for f in files: |
|
2233 | 2232 | fl = os.path.join(d, f) |
|
2234 | 2233 | if ( |
|
2235 | 2234 | fl not in self.series |
|
2236 | 2235 | and fl |
|
2237 | 2236 | not in ( |
|
2238 | 2237 | self.statuspath, |
|
2239 | 2238 | self.seriespath, |
|
2240 | 2239 | self.guardspath, |
|
2241 | 2240 | ) |
|
2242 | 2241 | and not fl.startswith(b'.') |
|
2243 | 2242 | ): |
|
2244 | 2243 | msng_list.append(fl) |
|
2245 | 2244 | for x in sorted(msng_list): |
|
2246 | 2245 | pfx = self.ui.verbose and b'D ' or b'' |
|
2247 | 2246 | displayname(pfx, x, b'missing') |
|
2248 | 2247 | |
|
2249 | 2248 | def issaveline(self, l): |
|
2250 | 2249 | if l.name == b'.hg.patches.save.line': |
|
2251 | 2250 | return True |
|
2252 | 2251 | |
|
2253 | 2252 | def qrepo(self, create=False): |
|
2254 | 2253 | ui = self.baseui.copy() |
|
2255 | 2254 | # copy back attributes set by ui.pager() |
|
2256 | 2255 | if self.ui.pageractive and not ui.pageractive: |
|
2257 | 2256 | ui.pageractive = self.ui.pageractive |
|
2258 | 2257 | # internal config: ui.formatted |
|
2259 | 2258 | ui.setconfig( |
|
2260 | 2259 | b'ui', |
|
2261 | 2260 | b'formatted', |
|
2262 | 2261 | self.ui.config(b'ui', b'formatted'), |
|
2263 | 2262 | b'mqpager', |
|
2264 | 2263 | ) |
|
2265 | 2264 | ui.setconfig( |
|
2266 | 2265 | b'ui', |
|
2267 | 2266 | b'interactive', |
|
2268 | 2267 | self.ui.config(b'ui', b'interactive'), |
|
2269 | 2268 | b'mqpager', |
|
2270 | 2269 | ) |
|
2271 | 2270 | if create or os.path.isdir(self.join(b".hg")): |
|
2272 | 2271 | return hg.repository(ui, path=self.path, create=create) |
|
2273 | 2272 | |
|
2274 | 2273 | def restore(self, repo, rev, delete=None, qupdate=None): |
|
2275 | 2274 | desc = repo[rev].description().strip() |
|
2276 | 2275 | lines = desc.splitlines() |
|
2277 | 2276 | datastart = None |
|
2278 | 2277 | series = [] |
|
2279 | 2278 | applied = [] |
|
2280 | 2279 | qpp = None |
|
2281 | 2280 | for i, line in enumerate(lines): |
|
2282 | 2281 | if line == b'Patch Data:': |
|
2283 | 2282 | datastart = i + 1 |
|
2284 | 2283 | elif line.startswith(b'Dirstate:'): |
|
2285 | 2284 | l = line.rstrip() |
|
2286 | 2285 | l = l[10:].split(b' ') |
|
2287 | 2286 | qpp = [bin(x) for x in l] |
|
2288 | 2287 | elif datastart is not None: |
|
2289 | 2288 | l = line.rstrip() |
|
2290 | 2289 | n, name = l.split(b':', 1) |
|
2291 | 2290 | if n: |
|
2292 | 2291 | applied.append(statusentry(bin(n), name)) |
|
2293 | 2292 | else: |
|
2294 | 2293 | series.append(l) |
|
2295 | 2294 | if datastart is None: |
|
2296 | 2295 | self.ui.warn(_(b"no saved patch data found\n")) |
|
2297 | 2296 | return 1 |
|
2298 | 2297 | self.ui.warn(_(b"restoring status: %s\n") % lines[0]) |
|
2299 | 2298 | self.fullseries = series |
|
2300 | 2299 | self.applied = applied |
|
2301 | 2300 | self.parseseries() |
|
2302 | 2301 | self.seriesdirty = True |
|
2303 | 2302 | self.applieddirty = True |
|
2304 | 2303 | heads = repo.changelog.heads() |
|
2305 | 2304 | if delete: |
|
2306 | 2305 | if rev not in heads: |
|
2307 | 2306 | self.ui.warn(_(b"save entry has children, leaving it alone\n")) |
|
2308 | 2307 | else: |
|
2309 | 2308 | self.ui.warn(_(b"removing save entry %s\n") % short(rev)) |
|
2310 | 2309 | pp = repo.dirstate.parents() |
|
2311 | 2310 | if rev in pp: |
|
2312 | 2311 | update = True |
|
2313 | 2312 | else: |
|
2314 | 2313 | update = False |
|
2315 | 2314 | strip(self.ui, repo, [rev], update=update, backup=False) |
|
2316 | 2315 | if qpp: |
|
2317 | 2316 | self.ui.warn( |
|
2318 | 2317 | _(b"saved queue repository parents: %s %s\n") |
|
2319 | 2318 | % (short(qpp[0]), short(qpp[1])) |
|
2320 | 2319 | ) |
|
2321 | 2320 | if qupdate: |
|
2322 | 2321 | self.ui.status(_(b"updating queue directory\n")) |
|
2323 | 2322 | r = self.qrepo() |
|
2324 | 2323 | if not r: |
|
2325 | 2324 | self.ui.warn(_(b"unable to load queue repository\n")) |
|
2326 | 2325 | return 1 |
|
2327 | 2326 | hg.clean(r, qpp[0]) |
|
2328 | 2327 | |
|
2329 | 2328 | def save(self, repo, msg=None): |
|
2330 | 2329 | if not self.applied: |
|
2331 | 2330 | self.ui.warn(_(b"save: no patches applied, exiting\n")) |
|
2332 | 2331 | return 1 |
|
2333 | 2332 | if self.issaveline(self.applied[-1]): |
|
2334 | 2333 | self.ui.warn(_(b"status is already saved\n")) |
|
2335 | 2334 | return 1 |
|
2336 | 2335 | |
|
2337 | 2336 | if not msg: |
|
2338 | 2337 | msg = _(b"hg patches saved state") |
|
2339 | 2338 | else: |
|
2340 | 2339 | msg = b"hg patches: " + msg.rstrip(b'\r\n') |
|
2341 | 2340 | r = self.qrepo() |
|
2342 | 2341 | if r: |
|
2343 | 2342 | pp = r.dirstate.parents() |
|
2344 | 2343 | msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1])) |
|
2345 | 2344 | msg += b"\n\nPatch Data:\n" |
|
2346 | 2345 | msg += b''.join(b'%s\n' % x for x in self.applied) |
|
2347 | 2346 | msg += b''.join(b':%s\n' % x for x in self.fullseries) |
|
2348 | 2347 | n = repo.commit(msg, force=True) |
|
2349 | 2348 | if not n: |
|
2350 | 2349 | self.ui.warn(_(b"repo commit failed\n")) |
|
2351 | 2350 | return 1 |
|
2352 | 2351 | self.applied.append(statusentry(n, b'.hg.patches.save.line')) |
|
2353 | 2352 | self.applieddirty = True |
|
2354 | 2353 | self.removeundo(repo) |
|
2355 | 2354 | |
|
2356 | 2355 | def fullseriesend(self): |
|
2357 | 2356 | if self.applied: |
|
2358 | 2357 | p = self.applied[-1].name |
|
2359 | 2358 | end = self.findseries(p) |
|
2360 | 2359 | if end is None: |
|
2361 | 2360 | return len(self.fullseries) |
|
2362 | 2361 | return end + 1 |
|
2363 | 2362 | return 0 |
|
2364 | 2363 | |
|
2365 | 2364 | def seriesend(self, all_patches=False): |
|
2366 | 2365 | """If all_patches is False, return the index of the next pushable patch |
|
2367 | 2366 | in the series, or the series length. If all_patches is True, return the |
|
2368 | 2367 | index of the first patch past the last applied one. |
|
2369 | 2368 | """ |
|
2370 | 2369 | end = 0 |
|
2371 | 2370 | |
|
2372 | 2371 | def nextpatch(start): |
|
2373 | 2372 | if all_patches or start >= len(self.series): |
|
2374 | 2373 | return start |
|
2375 | 2374 | for i in pycompat.xrange(start, len(self.series)): |
|
2376 | 2375 | p, reason = self.pushable(i) |
|
2377 | 2376 | if p: |
|
2378 | 2377 | return i |
|
2379 | 2378 | self.explainpushable(i) |
|
2380 | 2379 | return len(self.series) |
|
2381 | 2380 | |
|
2382 | 2381 | if self.applied: |
|
2383 | 2382 | p = self.applied[-1].name |
|
2384 | 2383 | try: |
|
2385 | 2384 | end = self.series.index(p) |
|
2386 | 2385 | except ValueError: |
|
2387 | 2386 | return 0 |
|
2388 | 2387 | return nextpatch(end + 1) |
|
2389 | 2388 | return nextpatch(end) |
|
2390 | 2389 | |
|
2391 | 2390 | def appliedname(self, index): |
|
2392 | 2391 | pname = self.applied[index].name |
|
2393 | 2392 | if not self.ui.verbose: |
|
2394 | 2393 | p = pname |
|
2395 | 2394 | else: |
|
2396 | 2395 | p = (b"%d" % self.series.index(pname)) + b" " + pname |
|
2397 | 2396 | return p |
|
2398 | 2397 | |
|
2399 | 2398 | def qimport( |
|
2400 | 2399 | self, |
|
2401 | 2400 | repo, |
|
2402 | 2401 | files, |
|
2403 | 2402 | patchname=None, |
|
2404 | 2403 | rev=None, |
|
2405 | 2404 | existing=None, |
|
2406 | 2405 | force=None, |
|
2407 | 2406 | git=False, |
|
2408 | 2407 | ): |
|
2409 | 2408 | def checkseries(patchname): |
|
2410 | 2409 | if patchname in self.series: |
|
2411 | 2410 | raise error.Abort( |
|
2412 | 2411 | _(b'patch %s is already in the series file') % patchname |
|
2413 | 2412 | ) |
|
2414 | 2413 | |
|
2415 | 2414 | if rev: |
|
2416 | 2415 | if files: |
|
2417 | 2416 | raise error.Abort( |
|
2418 | 2417 | _(b'option "-r" not valid when importing files') |
|
2419 | 2418 | ) |
|
2420 | 2419 | rev = scmutil.revrange(repo, rev) |
|
2421 | 2420 | rev.sort(reverse=True) |
|
2422 | 2421 | elif not files: |
|
2423 | 2422 | raise error.Abort(_(b'no files or revisions specified')) |
|
2424 | 2423 | if (len(files) > 1 or len(rev) > 1) and patchname: |
|
2425 | 2424 | raise error.Abort( |
|
2426 | 2425 | _(b'option "-n" not valid when importing multiple patches') |
|
2427 | 2426 | ) |
|
2428 | 2427 | imported = [] |
|
2429 | 2428 | if rev: |
|
2430 | 2429 | # If mq patches are applied, we can only import revisions |
|
2431 | 2430 | # that form a linear path to qbase. |
|
2432 | 2431 | # Otherwise, they should form a linear path to a head. |
|
2433 | 2432 | heads = repo.changelog.heads(repo.changelog.node(rev.first())) |
|
2434 | 2433 | if len(heads) > 1: |
|
2435 | 2434 | raise error.Abort( |
|
2436 | 2435 | _(b'revision %d is the root of more than one branch') |
|
2437 | 2436 | % rev.last() |
|
2438 | 2437 | ) |
|
2439 | 2438 | if self.applied: |
|
2440 | 2439 | base = repo.changelog.node(rev.first()) |
|
2441 | 2440 | if base in [n.node for n in self.applied]: |
|
2442 | 2441 | raise error.Abort( |
|
2443 | 2442 | _(b'revision %d is already managed') % rev.first() |
|
2444 | 2443 | ) |
|
2445 | 2444 | if heads != [self.applied[-1].node]: |
|
2446 | 2445 | raise error.Abort( |
|
2447 | 2446 | _(b'revision %d is not the parent of the queue') |
|
2448 | 2447 | % rev.first() |
|
2449 | 2448 | ) |
|
2450 | 2449 | base = repo.changelog.rev(self.applied[0].node) |
|
2451 | 2450 | lastparent = repo.changelog.parentrevs(base)[0] |
|
2452 | 2451 | else: |
|
2453 | 2452 | if heads != [repo.changelog.node(rev.first())]: |
|
2454 | 2453 | raise error.Abort( |
|
2455 | 2454 | _(b'revision %d has unmanaged children') % rev.first() |
|
2456 | 2455 | ) |
|
2457 | 2456 | lastparent = None |
|
2458 | 2457 | |
|
2459 | 2458 | diffopts = self.diffopts({b'git': git}) |
|
2460 | 2459 | with repo.transaction(b'qimport') as tr: |
|
2461 | 2460 | for r in rev: |
|
2462 | 2461 | if not repo[r].mutable(): |
|
2463 | 2462 | raise error.Abort( |
|
2464 | 2463 | _(b'revision %d is not mutable') % r, |
|
2465 | 2464 | hint=_(b"see 'hg help phases' " b'for details'), |
|
2466 | 2465 | ) |
|
2467 | 2466 | p1, p2 = repo.changelog.parentrevs(r) |
|
2468 | 2467 | n = repo.changelog.node(r) |
|
2469 | 2468 | if p2 != nullrev: |
|
2470 | 2469 | raise error.Abort( |
|
2471 | 2470 | _(b'cannot import merge revision %d') % r |
|
2472 | 2471 | ) |
|
2473 | 2472 | if lastparent and lastparent != r: |
|
2474 | 2473 | raise error.Abort( |
|
2475 | 2474 | _(b'revision %d is not the parent of %d') |
|
2476 | 2475 | % (r, lastparent) |
|
2477 | 2476 | ) |
|
2478 | 2477 | lastparent = p1 |
|
2479 | 2478 | |
|
2480 | 2479 | if not patchname: |
|
2481 | 2480 | patchname = self.makepatchname( |
|
2482 | 2481 | repo[r].description().split(b'\n', 1)[0], |
|
2483 | 2482 | b'%d.diff' % r, |
|
2484 | 2483 | ) |
|
2485 | 2484 | checkseries(patchname) |
|
2486 | 2485 | self.checkpatchname(patchname, force) |
|
2487 | 2486 | self.fullseries.insert(0, patchname) |
|
2488 | 2487 | |
|
2489 | 2488 | with self.opener(patchname, b"w") as fp: |
|
2490 | 2489 | cmdutil.exportfile(repo, [n], fp, opts=diffopts) |
|
2491 | 2490 | |
|
2492 | 2491 | se = statusentry(n, patchname) |
|
2493 | 2492 | self.applied.insert(0, se) |
|
2494 | 2493 | |
|
2495 | 2494 | self.added.append(patchname) |
|
2496 | 2495 | imported.append(patchname) |
|
2497 | 2496 | patchname = None |
|
2498 | 2497 | if rev and repo.ui.configbool(b'mq', b'secret'): |
|
2499 | 2498 | # if we added anything with --rev, move the secret root |
|
2500 | 2499 | phases.retractboundary(repo, tr, phases.secret, [n]) |
|
2501 | 2500 | self.parseseries() |
|
2502 | 2501 | self.applieddirty = True |
|
2503 | 2502 | self.seriesdirty = True |
|
2504 | 2503 | |
|
2505 | 2504 | for i, filename in enumerate(files): |
|
2506 | 2505 | if existing: |
|
2507 | 2506 | if filename == b'-': |
|
2508 | 2507 | raise error.Abort( |
|
2509 | 2508 | _(b'-e is incompatible with import from -') |
|
2510 | 2509 | ) |
|
2511 | 2510 | filename = normname(filename) |
|
2512 | 2511 | self.checkreservedname(filename) |
|
2513 | 2512 | if urlutil.url(filename).islocal(): |
|
2514 | 2513 | originpath = self.join(filename) |
|
2515 | 2514 | if not os.path.isfile(originpath): |
|
2516 | 2515 | raise error.Abort( |
|
2517 | 2516 | _(b"patch %s does not exist") % filename |
|
2518 | 2517 | ) |
|
2519 | 2518 | |
|
2520 | 2519 | if patchname: |
|
2521 | 2520 | self.checkpatchname(patchname, force) |
|
2522 | 2521 | |
|
2523 | 2522 | self.ui.write( |
|
2524 | 2523 | _(b'renaming %s to %s\n') % (filename, patchname) |
|
2525 | 2524 | ) |
|
2526 | 2525 | util.rename(originpath, self.join(patchname)) |
|
2527 | 2526 | else: |
|
2528 | 2527 | patchname = filename |
|
2529 | 2528 | |
|
2530 | 2529 | else: |
|
2531 | 2530 | if filename == b'-' and not patchname: |
|
2532 | 2531 | raise error.Abort( |
|
2533 | 2532 | _(b'need --name to import a patch from -') |
|
2534 | 2533 | ) |
|
2535 | 2534 | elif not patchname: |
|
2536 | 2535 | patchname = normname( |
|
2537 | 2536 | os.path.basename(filename.rstrip(b'/')) |
|
2538 | 2537 | ) |
|
2539 | 2538 | self.checkpatchname(patchname, force) |
|
2540 | 2539 | try: |
|
2541 | 2540 | if filename == b'-': |
|
2542 | 2541 | text = self.ui.fin.read() |
|
2543 | 2542 | else: |
|
2544 | 2543 | fp = hg.openpath(self.ui, filename) |
|
2545 | 2544 | text = fp.read() |
|
2546 | 2545 | fp.close() |
|
2547 | 2546 | except (OSError, IOError): |
|
2548 | 2547 | raise error.Abort(_(b"unable to read file %s") % filename) |
|
2549 | 2548 | patchf = self.opener(patchname, b"w") |
|
2550 | 2549 | patchf.write(text) |
|
2551 | 2550 | patchf.close() |
|
2552 | 2551 | if not force: |
|
2553 | 2552 | checkseries(patchname) |
|
2554 | 2553 | if patchname not in self.series: |
|
2555 | 2554 | index = self.fullseriesend() + i |
|
2556 | 2555 | self.fullseries[index:index] = [patchname] |
|
2557 | 2556 | self.parseseries() |
|
2558 | 2557 | self.seriesdirty = True |
|
2559 | 2558 | self.ui.warn(_(b"adding %s to series file\n") % patchname) |
|
2560 | 2559 | self.added.append(patchname) |
|
2561 | 2560 | imported.append(patchname) |
|
2562 | 2561 | patchname = None |
|
2563 | 2562 | |
|
2564 | 2563 | self.removeundo(repo) |
|
2565 | 2564 | return imported |
|
2566 | 2565 | |
|
2567 | 2566 | |
|
2568 | 2567 | def fixkeepchangesopts(ui, opts): |
|
2569 | 2568 | if ( |
|
2570 | 2569 | not ui.configbool(b'mq', b'keepchanges') |
|
2571 | 2570 | or opts.get(b'force') |
|
2572 | 2571 | or opts.get(b'exact') |
|
2573 | 2572 | ): |
|
2574 | 2573 | return opts |
|
2575 | 2574 | opts = dict(opts) |
|
2576 | 2575 | opts[b'keep_changes'] = True |
|
2577 | 2576 | return opts |
|
2578 | 2577 | |
|
2579 | 2578 | |
|
2580 | 2579 | @command( |
|
2581 | 2580 | b"qdelete|qremove|qrm", |
|
2582 | 2581 | [ |
|
2583 | 2582 | (b'k', b'keep', None, _(b'keep patch file')), |
|
2584 | 2583 | ( |
|
2585 | 2584 | b'r', |
|
2586 | 2585 | b'rev', |
|
2587 | 2586 | [], |
|
2588 | 2587 | _(b'stop managing a revision (DEPRECATED)'), |
|
2589 | 2588 | _(b'REV'), |
|
2590 | 2589 | ), |
|
2591 | 2590 | ], |
|
2592 | 2591 | _(b'hg qdelete [-k] [PATCH]...'), |
|
2593 | 2592 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2594 | 2593 | ) |
|
2595 | 2594 | def delete(ui, repo, *patches, **opts): |
|
2596 | 2595 | """remove patches from queue |
|
2597 | 2596 | |
|
2598 | 2597 | The patches must not be applied, and at least one patch is required. Exact |
|
2599 | 2598 | patch identifiers must be given. With -k/--keep, the patch files are |
|
2600 | 2599 | preserved in the patch directory. |
|
2601 | 2600 | |
|
2602 | 2601 | To stop managing a patch and move it into permanent history, |
|
2603 | 2602 | use the :hg:`qfinish` command.""" |
|
2604 | 2603 | q = repo.mq |
|
2605 | 2604 | q.delete(repo, patches, pycompat.byteskwargs(opts)) |
|
2606 | 2605 | q.savedirty() |
|
2607 | 2606 | return 0 |
|
2608 | 2607 | |
|
2609 | 2608 | |
|
2610 | 2609 | @command( |
|
2611 | 2610 | b"qapplied", |
|
2612 | 2611 | [(b'1', b'last', None, _(b'show only the preceding applied patch'))] |
|
2613 | 2612 | + seriesopts, |
|
2614 | 2613 | _(b'hg qapplied [-1] [-s] [PATCH]'), |
|
2615 | 2614 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2616 | 2615 | ) |
|
2617 | 2616 | def applied(ui, repo, patch=None, **opts): |
|
2618 | 2617 | """print the patches already applied |
|
2619 | 2618 | |
|
2620 | 2619 | Returns 0 on success.""" |
|
2621 | 2620 | |
|
2622 | 2621 | q = repo.mq |
|
2623 | 2622 | opts = pycompat.byteskwargs(opts) |
|
2624 | 2623 | |
|
2625 | 2624 | if patch: |
|
2626 | 2625 | if patch not in q.series: |
|
2627 | 2626 | raise error.Abort(_(b"patch %s is not in series file") % patch) |
|
2628 | 2627 | end = q.series.index(patch) + 1 |
|
2629 | 2628 | else: |
|
2630 | 2629 | end = q.seriesend(True) |
|
2631 | 2630 | |
|
2632 | 2631 | if opts.get(b'last') and not end: |
|
2633 | 2632 | ui.write(_(b"no patches applied\n")) |
|
2634 | 2633 | return 1 |
|
2635 | 2634 | elif opts.get(b'last') and end == 1: |
|
2636 | 2635 | ui.write(_(b"only one patch applied\n")) |
|
2637 | 2636 | return 1 |
|
2638 | 2637 | elif opts.get(b'last'): |
|
2639 | 2638 | start = end - 2 |
|
2640 | 2639 | end = 1 |
|
2641 | 2640 | else: |
|
2642 | 2641 | start = 0 |
|
2643 | 2642 | |
|
2644 | 2643 | q.qseries( |
|
2645 | 2644 | repo, length=end, start=start, status=b'A', summary=opts.get(b'summary') |
|
2646 | 2645 | ) |
|
2647 | 2646 | |
|
2648 | 2647 | |
|
2649 | 2648 | @command( |
|
2650 | 2649 | b"qunapplied", |
|
2651 | 2650 | [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts, |
|
2652 | 2651 | _(b'hg qunapplied [-1] [-s] [PATCH]'), |
|
2653 | 2652 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2654 | 2653 | ) |
|
2655 | 2654 | def unapplied(ui, repo, patch=None, **opts): |
|
2656 | 2655 | """print the patches not yet applied |
|
2657 | 2656 | |
|
2658 | 2657 | Returns 0 on success.""" |
|
2659 | 2658 | |
|
2660 | 2659 | q = repo.mq |
|
2661 | 2660 | opts = pycompat.byteskwargs(opts) |
|
2662 | 2661 | if patch: |
|
2663 | 2662 | if patch not in q.series: |
|
2664 | 2663 | raise error.Abort(_(b"patch %s is not in series file") % patch) |
|
2665 | 2664 | start = q.series.index(patch) + 1 |
|
2666 | 2665 | else: |
|
2667 | 2666 | start = q.seriesend(True) |
|
2668 | 2667 | |
|
2669 | 2668 | if start == len(q.series) and opts.get(b'first'): |
|
2670 | 2669 | ui.write(_(b"all patches applied\n")) |
|
2671 | 2670 | return 1 |
|
2672 | 2671 | |
|
2673 | 2672 | if opts.get(b'first'): |
|
2674 | 2673 | length = 1 |
|
2675 | 2674 | else: |
|
2676 | 2675 | length = None |
|
2677 | 2676 | q.qseries( |
|
2678 | 2677 | repo, |
|
2679 | 2678 | start=start, |
|
2680 | 2679 | length=length, |
|
2681 | 2680 | status=b'U', |
|
2682 | 2681 | summary=opts.get(b'summary'), |
|
2683 | 2682 | ) |
|
2684 | 2683 | |
|
2685 | 2684 | |
|
2686 | 2685 | @command( |
|
2687 | 2686 | b"qimport", |
|
2688 | 2687 | [ |
|
2689 | 2688 | (b'e', b'existing', None, _(b'import file in patch directory')), |
|
2690 | 2689 | (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')), |
|
2691 | 2690 | (b'f', b'force', None, _(b'overwrite existing files')), |
|
2692 | 2691 | ( |
|
2693 | 2692 | b'r', |
|
2694 | 2693 | b'rev', |
|
2695 | 2694 | [], |
|
2696 | 2695 | _(b'place existing revisions under mq control'), |
|
2697 | 2696 | _(b'REV'), |
|
2698 | 2697 | ), |
|
2699 | 2698 | (b'g', b'git', None, _(b'use git extended diff format')), |
|
2700 | 2699 | (b'P', b'push', None, _(b'qpush after importing')), |
|
2701 | 2700 | ], |
|
2702 | 2701 | _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'), |
|
2703 | 2702 | helpcategory=command.CATEGORY_IMPORT_EXPORT, |
|
2704 | 2703 | ) |
|
2705 | 2704 | def qimport(ui, repo, *filename, **opts): |
|
2706 | 2705 | """import a patch or existing changeset |
|
2707 | 2706 | |
|
2708 | 2707 | The patch is inserted into the series after the last applied |
|
2709 | 2708 | patch. If no patches have been applied, qimport prepends the patch |
|
2710 | 2709 | to the series. |
|
2711 | 2710 | |
|
2712 | 2711 | The patch will have the same name as its source file unless you |
|
2713 | 2712 | give it a new one with -n/--name. |
|
2714 | 2713 | |
|
2715 | 2714 | You can register an existing patch inside the patch directory with |
|
2716 | 2715 | the -e/--existing flag. |
|
2717 | 2716 | |
|
2718 | 2717 | With -f/--force, an existing patch of the same name will be |
|
2719 | 2718 | overwritten. |
|
2720 | 2719 | |
|
2721 | 2720 | An existing changeset may be placed under mq control with -r/--rev |
|
2722 | 2721 | (e.g. qimport --rev . -n patch will place the current revision |
|
2723 | 2722 | under mq control). With -g/--git, patches imported with --rev will |
|
2724 | 2723 | use the git diff format. See the diffs help topic for information |
|
2725 | 2724 | on why this is important for preserving rename/copy information |
|
2726 | 2725 | and permission changes. Use :hg:`qfinish` to remove changesets |
|
2727 | 2726 | from mq control. |
|
2728 | 2727 | |
|
2729 | 2728 | To import a patch from standard input, pass - as the patch file. |
|
2730 | 2729 | When importing from standard input, a patch name must be specified |
|
2731 | 2730 | using the --name flag. |
|
2732 | 2731 | |
|
2733 | 2732 | To import an existing patch while renaming it:: |
|
2734 | 2733 | |
|
2735 | 2734 | hg qimport -e existing-patch -n new-name |
|
2736 | 2735 | |
|
2737 | 2736 | Returns 0 if import succeeded. |
|
2738 | 2737 | """ |
|
2739 | 2738 | opts = pycompat.byteskwargs(opts) |
|
2740 | 2739 | with repo.lock(): # cause this may move phase |
|
2741 | 2740 | q = repo.mq |
|
2742 | 2741 | try: |
|
2743 | 2742 | imported = q.qimport( |
|
2744 | 2743 | repo, |
|
2745 | 2744 | filename, |
|
2746 | 2745 | patchname=opts.get(b'name'), |
|
2747 | 2746 | existing=opts.get(b'existing'), |
|
2748 | 2747 | force=opts.get(b'force'), |
|
2749 | 2748 | rev=opts.get(b'rev'), |
|
2750 | 2749 | git=opts.get(b'git'), |
|
2751 | 2750 | ) |
|
2752 | 2751 | finally: |
|
2753 | 2752 | q.savedirty() |
|
2754 | 2753 | |
|
2755 | 2754 | if imported and opts.get(b'push') and not opts.get(b'rev'): |
|
2756 | 2755 | return q.push(repo, imported[-1]) |
|
2757 | 2756 | return 0 |
|
2758 | 2757 | |
|
2759 | 2758 | |
|
2760 | 2759 | def qinit(ui, repo, create): |
|
2761 | 2760 | """initialize a new queue repository |
|
2762 | 2761 | |
|
2763 | 2762 | This command also creates a series file for ordering patches, and |
|
2764 | 2763 | an mq-specific .hgignore file in the queue repository, to exclude |
|
2765 | 2764 | the status and guards files (these contain mostly transient state). |
|
2766 | 2765 | |
|
2767 | 2766 | Returns 0 if initialization succeeded.""" |
|
2768 | 2767 | q = repo.mq |
|
2769 | 2768 | r = q.init(repo, create) |
|
2770 | 2769 | q.savedirty() |
|
2771 | 2770 | if r: |
|
2772 | 2771 | if not os.path.exists(r.wjoin(b'.hgignore')): |
|
2773 | 2772 | fp = r.wvfs(b'.hgignore', b'w') |
|
2774 | 2773 | fp.write(b'^\\.hg\n') |
|
2775 | 2774 | fp.write(b'^\\.mq\n') |
|
2776 | 2775 | fp.write(b'syntax: glob\n') |
|
2777 | 2776 | fp.write(b'status\n') |
|
2778 | 2777 | fp.write(b'guards\n') |
|
2779 | 2778 | fp.close() |
|
2780 | 2779 | if not os.path.exists(r.wjoin(b'series')): |
|
2781 | 2780 | r.wvfs(b'series', b'w').close() |
|
2782 | 2781 | r[None].add([b'.hgignore', b'series']) |
|
2783 | 2782 | commands.add(ui, r) |
|
2784 | 2783 | return 0 |
|
2785 | 2784 | |
|
2786 | 2785 | |
|
2787 | 2786 | @command( |
|
2788 | 2787 | b"qinit", |
|
2789 | 2788 | [(b'c', b'create-repo', None, _(b'create queue repository'))], |
|
2790 | 2789 | _(b'hg qinit [-c]'), |
|
2791 | 2790 | helpcategory=command.CATEGORY_REPO_CREATION, |
|
2792 | 2791 | helpbasic=True, |
|
2793 | 2792 | ) |
|
2794 | 2793 | def init(ui, repo, **opts): |
|
2795 | 2794 | """init a new queue repository (DEPRECATED) |
|
2796 | 2795 | |
|
2797 | 2796 | The queue repository is unversioned by default. If |
|
2798 | 2797 | -c/--create-repo is specified, qinit will create a separate nested |
|
2799 | 2798 | repository for patches (qinit -c may also be run later to convert |
|
2800 | 2799 | an unversioned patch repository into a versioned one). You can use |
|
2801 | 2800 | qcommit to commit changes to this queue repository. |
|
2802 | 2801 | |
|
2803 | 2802 | This command is deprecated. Without -c, it's implied by other relevant |
|
2804 | 2803 | commands. With -c, use :hg:`init --mq` instead.""" |
|
2805 | 2804 | return qinit(ui, repo, create=opts.get('create_repo')) |
|
2806 | 2805 | |
|
2807 | 2806 | |
|
2808 | 2807 | @command( |
|
2809 | 2808 | b"qclone", |
|
2810 | 2809 | [ |
|
2811 | 2810 | (b'', b'pull', None, _(b'use pull protocol to copy metadata')), |
|
2812 | 2811 | ( |
|
2813 | 2812 | b'U', |
|
2814 | 2813 | b'noupdate', |
|
2815 | 2814 | None, |
|
2816 | 2815 | _(b'do not update the new working directories'), |
|
2817 | 2816 | ), |
|
2818 | 2817 | ( |
|
2819 | 2818 | b'', |
|
2820 | 2819 | b'uncompressed', |
|
2821 | 2820 | None, |
|
2822 | 2821 | _(b'use uncompressed transfer (fast over LAN)'), |
|
2823 | 2822 | ), |
|
2824 | 2823 | ( |
|
2825 | 2824 | b'p', |
|
2826 | 2825 | b'patches', |
|
2827 | 2826 | b'', |
|
2828 | 2827 | _(b'location of source patch repository'), |
|
2829 | 2828 | _(b'REPO'), |
|
2830 | 2829 | ), |
|
2831 | 2830 | ] |
|
2832 | 2831 | + cmdutil.remoteopts, |
|
2833 | 2832 | _(b'hg qclone [OPTION]... SOURCE [DEST]'), |
|
2834 | 2833 | helpcategory=command.CATEGORY_REPO_CREATION, |
|
2835 | 2834 | norepo=True, |
|
2836 | 2835 | ) |
|
2837 | 2836 | def clone(ui, source, dest=None, **opts): |
|
2838 | 2837 | """clone main and patch repository at same time |
|
2839 | 2838 | |
|
2840 | 2839 | If source is local, destination will have no patches applied. If |
|
2841 | 2840 | source is remote, this command can not check if patches are |
|
2842 | 2841 | applied in source, so cannot guarantee that patches are not |
|
2843 | 2842 | applied in destination. If you clone remote repository, be sure |
|
2844 | 2843 | before that it has no patches applied. |
|
2845 | 2844 | |
|
2846 | 2845 | Source patch repository is looked for in <src>/.hg/patches by |
|
2847 | 2846 | default. Use -p <url> to change. |
|
2848 | 2847 | |
|
2849 | 2848 | The patch directory must be a nested Mercurial repository, as |
|
2850 | 2849 | would be created by :hg:`init --mq`. |
|
2851 | 2850 | |
|
2852 | 2851 | Return 0 on success. |
|
2853 | 2852 | """ |
|
2854 | 2853 | opts = pycompat.byteskwargs(opts) |
|
2855 | 2854 | |
|
2856 | 2855 | def patchdir(repo): |
|
2857 | 2856 | """compute a patch repo url from a repo object""" |
|
2858 | 2857 | url = repo.url() |
|
2859 | 2858 | if url.endswith(b'/'): |
|
2860 | 2859 | url = url[:-1] |
|
2861 | 2860 | return url + b'/.hg/patches' |
|
2862 | 2861 | |
|
2863 | 2862 | # main repo (destination and sources) |
|
2864 | 2863 | if dest is None: |
|
2865 | 2864 | dest = hg.defaultdest(source) |
|
2866 | 2865 | __, source_path, __ = urlutil.get_clone_path(ui, source) |
|
2867 | 2866 | sr = hg.peer(ui, opts, source_path) |
|
2868 | 2867 | |
|
2869 | 2868 | # patches repo (source only) |
|
2870 | 2869 | if opts.get(b'patches'): |
|
2871 | 2870 | __, patchespath, __ = urlutil.get_clone_path(ui, opts.get(b'patches')) |
|
2872 | 2871 | else: |
|
2873 | 2872 | patchespath = patchdir(sr) |
|
2874 | 2873 | try: |
|
2875 | 2874 | hg.peer(ui, opts, patchespath) |
|
2876 | 2875 | except error.RepoError: |
|
2877 | 2876 | raise error.Abort( |
|
2878 | 2877 | _(b'versioned patch repository not found (see init --mq)') |
|
2879 | 2878 | ) |
|
2880 | 2879 | qbase, destrev = None, None |
|
2881 | 2880 | if sr.local(): |
|
2882 | 2881 | repo = sr.local() |
|
2883 | 2882 | if repo.mq.applied and repo[qbase].phase() != phases.secret: |
|
2884 | 2883 | qbase = repo.mq.applied[0].node |
|
2885 | 2884 | if not hg.islocal(dest): |
|
2886 | 2885 | heads = set(repo.heads()) |
|
2887 | 2886 | destrev = list(heads.difference(repo.heads(qbase))) |
|
2888 | 2887 | destrev.append(repo.changelog.parents(qbase)[0]) |
|
2889 | 2888 | elif sr.capable(b'lookup'): |
|
2890 | 2889 | try: |
|
2891 | 2890 | qbase = sr.lookup(b'qbase') |
|
2892 | 2891 | except error.RepoError: |
|
2893 | 2892 | pass |
|
2894 | 2893 | |
|
2895 | 2894 | ui.note(_(b'cloning main repository\n')) |
|
2896 | 2895 | sr, dr = hg.clone( |
|
2897 | 2896 | ui, |
|
2898 | 2897 | opts, |
|
2899 | 2898 | sr.url(), |
|
2900 | 2899 | dest, |
|
2901 | 2900 | pull=opts.get(b'pull'), |
|
2902 | 2901 | revs=destrev, |
|
2903 | 2902 | update=False, |
|
2904 | 2903 | stream=opts.get(b'uncompressed'), |
|
2905 | 2904 | ) |
|
2906 | 2905 | |
|
2907 | 2906 | ui.note(_(b'cloning patch repository\n')) |
|
2908 | 2907 | hg.clone( |
|
2909 | 2908 | ui, |
|
2910 | 2909 | opts, |
|
2911 | 2910 | opts.get(b'patches') or patchdir(sr), |
|
2912 | 2911 | patchdir(dr), |
|
2913 | 2912 | pull=opts.get(b'pull'), |
|
2914 | 2913 | update=not opts.get(b'noupdate'), |
|
2915 | 2914 | stream=opts.get(b'uncompressed'), |
|
2916 | 2915 | ) |
|
2917 | 2916 | |
|
2918 | 2917 | if dr.local(): |
|
2919 | 2918 | repo = dr.local() |
|
2920 | 2919 | if qbase: |
|
2921 | 2920 | ui.note( |
|
2922 | 2921 | _( |
|
2923 | 2922 | b'stripping applied patches from destination ' |
|
2924 | 2923 | b'repository\n' |
|
2925 | 2924 | ) |
|
2926 | 2925 | ) |
|
2927 | 2926 | strip(ui, repo, [qbase], update=False, backup=None) |
|
2928 | 2927 | if not opts.get(b'noupdate'): |
|
2929 | 2928 | ui.note(_(b'updating destination repository\n')) |
|
2930 | 2929 | hg.update(repo, repo.changelog.tip()) |
|
2931 | 2930 | |
|
2932 | 2931 | |
|
2933 | 2932 | @command( |
|
2934 | 2933 | b"qcommit|qci", |
|
2935 | 2934 | commands.table[b"commit|ci"][1], |
|
2936 | 2935 | _(b'hg qcommit [OPTION]... [FILE]...'), |
|
2937 | 2936 | helpcategory=command.CATEGORY_COMMITTING, |
|
2938 | 2937 | inferrepo=True, |
|
2939 | 2938 | ) |
|
2940 | 2939 | def commit(ui, repo, *pats, **opts): |
|
2941 | 2940 | """commit changes in the queue repository (DEPRECATED) |
|
2942 | 2941 | |
|
2943 | 2942 | This command is deprecated; use :hg:`commit --mq` instead.""" |
|
2944 | 2943 | q = repo.mq |
|
2945 | 2944 | r = q.qrepo() |
|
2946 | 2945 | if not r: |
|
2947 | 2946 | raise error.Abort(b'no queue repository') |
|
2948 | 2947 | commands.commit(r.ui, r, *pats, **opts) |
|
2949 | 2948 | |
|
2950 | 2949 | |
|
2951 | 2950 | @command( |
|
2952 | 2951 | b"qseries", |
|
2953 | 2952 | [ |
|
2954 | 2953 | (b'm', b'missing', None, _(b'print patches not in series')), |
|
2955 | 2954 | ] |
|
2956 | 2955 | + seriesopts, |
|
2957 | 2956 | _(b'hg qseries [-ms]'), |
|
2958 | 2957 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2959 | 2958 | ) |
|
2960 | 2959 | def series(ui, repo, **opts): |
|
2961 | 2960 | """print the entire series file |
|
2962 | 2961 | |
|
2963 | 2962 | Returns 0 on success.""" |
|
2964 | 2963 | repo.mq.qseries( |
|
2965 | 2964 | repo, missing=opts.get('missing'), summary=opts.get('summary') |
|
2966 | 2965 | ) |
|
2967 | 2966 | return 0 |
|
2968 | 2967 | |
|
2969 | 2968 | |
|
2970 | 2969 | @command( |
|
2971 | 2970 | b"qtop", |
|
2972 | 2971 | seriesopts, |
|
2973 | 2972 | _(b'hg qtop [-s]'), |
|
2974 | 2973 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
2975 | 2974 | ) |
|
2976 | 2975 | def top(ui, repo, **opts): |
|
2977 | 2976 | """print the name of the current patch |
|
2978 | 2977 | |
|
2979 | 2978 | Returns 0 on success.""" |
|
2980 | 2979 | q = repo.mq |
|
2981 | 2980 | if q.applied: |
|
2982 | 2981 | t = q.seriesend(True) |
|
2983 | 2982 | else: |
|
2984 | 2983 | t = 0 |
|
2985 | 2984 | |
|
2986 | 2985 | if t: |
|
2987 | 2986 | q.qseries( |
|
2988 | 2987 | repo, |
|
2989 | 2988 | start=t - 1, |
|
2990 | 2989 | length=1, |
|
2991 | 2990 | status=b'A', |
|
2992 | 2991 | summary=opts.get('summary'), |
|
2993 | 2992 | ) |
|
2994 | 2993 | else: |
|
2995 | 2994 | ui.write(_(b"no patches applied\n")) |
|
2996 | 2995 | return 1 |
|
2997 | 2996 | |
|
2998 | 2997 | |
|
2999 | 2998 | @command( |
|
3000 | 2999 | b"qnext", |
|
3001 | 3000 | seriesopts, |
|
3002 | 3001 | _(b'hg qnext [-s]'), |
|
3003 | 3002 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3004 | 3003 | ) |
|
3005 | 3004 | def next(ui, repo, **opts): |
|
3006 | 3005 | """print the name of the next pushable patch |
|
3007 | 3006 | |
|
3008 | 3007 | Returns 0 on success.""" |
|
3009 | 3008 | q = repo.mq |
|
3010 | 3009 | end = q.seriesend() |
|
3011 | 3010 | if end == len(q.series): |
|
3012 | 3011 | ui.write(_(b"all patches applied\n")) |
|
3013 | 3012 | return 1 |
|
3014 | 3013 | q.qseries(repo, start=end, length=1, summary=opts.get('summary')) |
|
3015 | 3014 | |
|
3016 | 3015 | |
|
3017 | 3016 | @command( |
|
3018 | 3017 | b"qprev", |
|
3019 | 3018 | seriesopts, |
|
3020 | 3019 | _(b'hg qprev [-s]'), |
|
3021 | 3020 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3022 | 3021 | ) |
|
3023 | 3022 | def prev(ui, repo, **opts): |
|
3024 | 3023 | """print the name of the preceding applied patch |
|
3025 | 3024 | |
|
3026 | 3025 | Returns 0 on success.""" |
|
3027 | 3026 | q = repo.mq |
|
3028 | 3027 | l = len(q.applied) |
|
3029 | 3028 | if l == 1: |
|
3030 | 3029 | ui.write(_(b"only one patch applied\n")) |
|
3031 | 3030 | return 1 |
|
3032 | 3031 | if not l: |
|
3033 | 3032 | ui.write(_(b"no patches applied\n")) |
|
3034 | 3033 | return 1 |
|
3035 | 3034 | idx = q.series.index(q.applied[-2].name) |
|
3036 | 3035 | q.qseries( |
|
3037 | 3036 | repo, start=idx, length=1, status=b'A', summary=opts.get('summary') |
|
3038 | 3037 | ) |
|
3039 | 3038 | |
|
3040 | 3039 | |
|
3041 | 3040 | def setupheaderopts(ui, opts): |
|
3042 | 3041 | if not opts.get(b'user') and opts.get(b'currentuser'): |
|
3043 | 3042 | opts[b'user'] = ui.username() |
|
3044 | 3043 | if not opts.get(b'date') and opts.get(b'currentdate'): |
|
3045 | 3044 | opts[b'date'] = b"%d %d" % dateutil.makedate() |
|
3046 | 3045 | |
|
3047 | 3046 | |
|
3048 | 3047 | @command( |
|
3049 | 3048 | b"qnew", |
|
3050 | 3049 | [ |
|
3051 | 3050 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), |
|
3052 | 3051 | (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')), |
|
3053 | 3052 | (b'g', b'git', None, _(b'use git extended diff format')), |
|
3054 | 3053 | (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')), |
|
3055 | 3054 | (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')), |
|
3056 | 3055 | (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')), |
|
3057 | 3056 | (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')), |
|
3058 | 3057 | ] |
|
3059 | 3058 | + cmdutil.walkopts |
|
3060 | 3059 | + cmdutil.commitopts, |
|
3061 | 3060 | _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'), |
|
3062 | 3061 | helpcategory=command.CATEGORY_COMMITTING, |
|
3063 | 3062 | helpbasic=True, |
|
3064 | 3063 | inferrepo=True, |
|
3065 | 3064 | ) |
|
3066 | 3065 | def new(ui, repo, patch, *args, **opts): |
|
3067 | 3066 | """create a new patch |
|
3068 | 3067 | |
|
3069 | 3068 | qnew creates a new patch on top of the currently-applied patch (if |
|
3070 | 3069 | any). The patch will be initialized with any outstanding changes |
|
3071 | 3070 | in the working directory. You may also use -I/--include, |
|
3072 | 3071 | -X/--exclude, and/or a list of files after the patch name to add |
|
3073 | 3072 | only changes to matching files to the new patch, leaving the rest |
|
3074 | 3073 | as uncommitted modifications. |
|
3075 | 3074 | |
|
3076 | 3075 | -u/--user and -d/--date can be used to set the (given) user and |
|
3077 | 3076 | date, respectively. -U/--currentuser and -D/--currentdate set user |
|
3078 | 3077 | to current user and date to current date. |
|
3079 | 3078 | |
|
3080 | 3079 | -e/--edit, -m/--message or -l/--logfile set the patch header as |
|
3081 | 3080 | well as the commit message. If none is specified, the header is |
|
3082 | 3081 | empty and the commit message is '[mq]: PATCH'. |
|
3083 | 3082 | |
|
3084 | 3083 | Use the -g/--git option to keep the patch in the git extended diff |
|
3085 | 3084 | format. Read the diffs help topic for more information on why this |
|
3086 | 3085 | is important for preserving permission changes and copy/rename |
|
3087 | 3086 | information. |
|
3088 | 3087 | |
|
3089 | 3088 | Returns 0 on successful creation of a new patch. |
|
3090 | 3089 | """ |
|
3091 | 3090 | opts = pycompat.byteskwargs(opts) |
|
3092 | 3091 | msg = cmdutil.logmessage(ui, opts) |
|
3093 | 3092 | q = repo.mq |
|
3094 | 3093 | opts[b'msg'] = msg |
|
3095 | 3094 | setupheaderopts(ui, opts) |
|
3096 | 3095 | q.new(repo, patch, *args, **pycompat.strkwargs(opts)) |
|
3097 | 3096 | q.savedirty() |
|
3098 | 3097 | return 0 |
|
3099 | 3098 | |
|
3100 | 3099 | |
|
3101 | 3100 | @command( |
|
3102 | 3101 | b"qrefresh", |
|
3103 | 3102 | [ |
|
3104 | 3103 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), |
|
3105 | 3104 | (b'g', b'git', None, _(b'use git extended diff format')), |
|
3106 | 3105 | ( |
|
3107 | 3106 | b's', |
|
3108 | 3107 | b'short', |
|
3109 | 3108 | None, |
|
3110 | 3109 | _(b'refresh only files already in the patch and specified files'), |
|
3111 | 3110 | ), |
|
3112 | 3111 | ( |
|
3113 | 3112 | b'U', |
|
3114 | 3113 | b'currentuser', |
|
3115 | 3114 | None, |
|
3116 | 3115 | _(b'add/update author field in patch with current user'), |
|
3117 | 3116 | ), |
|
3118 | 3117 | ( |
|
3119 | 3118 | b'u', |
|
3120 | 3119 | b'user', |
|
3121 | 3120 | b'', |
|
3122 | 3121 | _(b'add/update author field in patch with given user'), |
|
3123 | 3122 | _(b'USER'), |
|
3124 | 3123 | ), |
|
3125 | 3124 | ( |
|
3126 | 3125 | b'D', |
|
3127 | 3126 | b'currentdate', |
|
3128 | 3127 | None, |
|
3129 | 3128 | _(b'add/update date field in patch with current date'), |
|
3130 | 3129 | ), |
|
3131 | 3130 | ( |
|
3132 | 3131 | b'd', |
|
3133 | 3132 | b'date', |
|
3134 | 3133 | b'', |
|
3135 | 3134 | _(b'add/update date field in patch with given date'), |
|
3136 | 3135 | _(b'DATE'), |
|
3137 | 3136 | ), |
|
3138 | 3137 | ] |
|
3139 | 3138 | + cmdutil.walkopts |
|
3140 | 3139 | + cmdutil.commitopts, |
|
3141 | 3140 | _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'), |
|
3142 | 3141 | helpcategory=command.CATEGORY_COMMITTING, |
|
3143 | 3142 | helpbasic=True, |
|
3144 | 3143 | inferrepo=True, |
|
3145 | 3144 | ) |
|
3146 | 3145 | def refresh(ui, repo, *pats, **opts): |
|
3147 | 3146 | """update the current patch |
|
3148 | 3147 | |
|
3149 | 3148 | If any file patterns are provided, the refreshed patch will |
|
3150 | 3149 | contain only the modifications that match those patterns; the |
|
3151 | 3150 | remaining modifications will remain in the working directory. |
|
3152 | 3151 | |
|
3153 | 3152 | If -s/--short is specified, files currently included in the patch |
|
3154 | 3153 | will be refreshed just like matched files and remain in the patch. |
|
3155 | 3154 | |
|
3156 | 3155 | If -e/--edit is specified, Mercurial will start your configured editor for |
|
3157 | 3156 | you to enter a message. In case qrefresh fails, you will find a backup of |
|
3158 | 3157 | your message in ``.hg/last-message.txt``. |
|
3159 | 3158 | |
|
3160 | 3159 | hg add/remove/copy/rename work as usual, though you might want to |
|
3161 | 3160 | use git-style patches (-g/--git or [diff] git=1) to track copies |
|
3162 | 3161 | and renames. See the diffs help topic for more information on the |
|
3163 | 3162 | git diff format. |
|
3164 | 3163 | |
|
3165 | 3164 | Returns 0 on success. |
|
3166 | 3165 | """ |
|
3167 | 3166 | opts = pycompat.byteskwargs(opts) |
|
3168 | 3167 | q = repo.mq |
|
3169 | 3168 | message = cmdutil.logmessage(ui, opts) |
|
3170 | 3169 | setupheaderopts(ui, opts) |
|
3171 | 3170 | with repo.wlock(): |
|
3172 | 3171 | ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts)) |
|
3173 | 3172 | q.savedirty() |
|
3174 | 3173 | return ret |
|
3175 | 3174 | |
|
3176 | 3175 | |
|
3177 | 3176 | @command( |
|
3178 | 3177 | b"qdiff", |
|
3179 | 3178 | cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts, |
|
3180 | 3179 | _(b'hg qdiff [OPTION]... [FILE]...'), |
|
3181 | 3180 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
3182 | 3181 | helpbasic=True, |
|
3183 | 3182 | inferrepo=True, |
|
3184 | 3183 | ) |
|
3185 | 3184 | def diff(ui, repo, *pats, **opts): |
|
3186 | 3185 | """diff of the current patch and subsequent modifications |
|
3187 | 3186 | |
|
3188 | 3187 | Shows a diff which includes the current patch as well as any |
|
3189 | 3188 | changes which have been made in the working directory since the |
|
3190 | 3189 | last refresh (thus showing what the current patch would become |
|
3191 | 3190 | after a qrefresh). |
|
3192 | 3191 | |
|
3193 | 3192 | Use :hg:`diff` if you only want to see the changes made since the |
|
3194 | 3193 | last qrefresh, or :hg:`export qtip` if you want to see changes |
|
3195 | 3194 | made by the current patch without including changes made since the |
|
3196 | 3195 | qrefresh. |
|
3197 | 3196 | |
|
3198 | 3197 | Returns 0 on success. |
|
3199 | 3198 | """ |
|
3200 | 3199 | ui.pager(b'qdiff') |
|
3201 | 3200 | repo.mq.diff(repo, pats, pycompat.byteskwargs(opts)) |
|
3202 | 3201 | return 0 |
|
3203 | 3202 | |
|
3204 | 3203 | |
|
3205 | 3204 | @command( |
|
3206 | 3205 | b'qfold', |
|
3207 | 3206 | [ |
|
3208 | 3207 | (b'e', b'edit', None, _(b'invoke editor on commit messages')), |
|
3209 | 3208 | (b'k', b'keep', None, _(b'keep folded patch files')), |
|
3210 | 3209 | ] |
|
3211 | 3210 | + cmdutil.commitopts, |
|
3212 | 3211 | _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'), |
|
3213 | 3212 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, |
|
3214 | 3213 | ) |
|
3215 | 3214 | def fold(ui, repo, *files, **opts): |
|
3216 | 3215 | """fold the named patches into the current patch |
|
3217 | 3216 | |
|
3218 | 3217 | Patches must not yet be applied. Each patch will be successively |
|
3219 | 3218 | applied to the current patch in the order given. If all the |
|
3220 | 3219 | patches apply successfully, the current patch will be refreshed |
|
3221 | 3220 | with the new cumulative patch, and the folded patches will be |
|
3222 | 3221 | deleted. With -k/--keep, the folded patch files will not be |
|
3223 | 3222 | removed afterwards. |
|
3224 | 3223 | |
|
3225 | 3224 | The header for each folded patch will be concatenated with the |
|
3226 | 3225 | current patch header, separated by a line of ``* * *``. |
|
3227 | 3226 | |
|
3228 | 3227 | Returns 0 on success.""" |
|
3229 | 3228 | opts = pycompat.byteskwargs(opts) |
|
3230 | 3229 | q = repo.mq |
|
3231 | 3230 | if not files: |
|
3232 | 3231 | raise error.Abort(_(b'qfold requires at least one patch name')) |
|
3233 | 3232 | if not q.checktoppatch(repo)[0]: |
|
3234 | 3233 | raise error.Abort(_(b'no patches applied')) |
|
3235 | 3234 | q.checklocalchanges(repo) |
|
3236 | 3235 | |
|
3237 | 3236 | message = cmdutil.logmessage(ui, opts) |
|
3238 | 3237 | |
|
3239 | 3238 | parent = q.lookup(b'qtip') |
|
3240 | 3239 | patches = [] |
|
3241 | 3240 | messages = [] |
|
3242 | 3241 | for f in files: |
|
3243 | 3242 | p = q.lookup(f) |
|
3244 | 3243 | if p in patches or p == parent: |
|
3245 | 3244 | ui.warn(_(b'skipping already folded patch %s\n') % p) |
|
3246 | 3245 | if q.isapplied(p): |
|
3247 | 3246 | raise error.Abort( |
|
3248 | 3247 | _(b'qfold cannot fold already applied patch %s') % p |
|
3249 | 3248 | ) |
|
3250 | 3249 | patches.append(p) |
|
3251 | 3250 | |
|
3252 | 3251 | for p in patches: |
|
3253 | 3252 | if not message: |
|
3254 | 3253 | ph = patchheader(q.join(p), q.plainmode) |
|
3255 | 3254 | if ph.message: |
|
3256 | 3255 | messages.append(ph.message) |
|
3257 | 3256 | pf = q.join(p) |
|
3258 | 3257 | (patchsuccess, files, fuzz) = q.patch(repo, pf) |
|
3259 | 3258 | if not patchsuccess: |
|
3260 | 3259 | raise error.Abort(_(b'error folding patch %s') % p) |
|
3261 | 3260 | |
|
3262 | 3261 | if not message: |
|
3263 | 3262 | ph = patchheader(q.join(parent), q.plainmode) |
|
3264 | 3263 | message = ph.message |
|
3265 | 3264 | for msg in messages: |
|
3266 | 3265 | if msg: |
|
3267 | 3266 | if message: |
|
3268 | 3267 | message.append(b'* * *') |
|
3269 | 3268 | message.extend(msg) |
|
3270 | 3269 | message = b'\n'.join(message) |
|
3271 | 3270 | |
|
3272 | 3271 | diffopts = q.patchopts(q.diffopts(), *patches) |
|
3273 | 3272 | with repo.wlock(): |
|
3274 | 3273 | q.refresh( |
|
3275 | 3274 | repo, |
|
3276 | 3275 | msg=message, |
|
3277 | 3276 | git=diffopts.git, |
|
3278 | 3277 | edit=opts.get(b'edit'), |
|
3279 | 3278 | editform=b'mq.qfold', |
|
3280 | 3279 | ) |
|
3281 | 3280 | q.delete(repo, patches, opts) |
|
3282 | 3281 | q.savedirty() |
|
3283 | 3282 | |
|
3284 | 3283 | |
|
3285 | 3284 | @command( |
|
3286 | 3285 | b"qgoto", |
|
3287 | 3286 | [ |
|
3288 | 3287 | ( |
|
3289 | 3288 | b'', |
|
3290 | 3289 | b'keep-changes', |
|
3291 | 3290 | None, |
|
3292 | 3291 | _(b'tolerate non-conflicting local changes'), |
|
3293 | 3292 | ), |
|
3294 | 3293 | (b'f', b'force', None, _(b'overwrite any local changes')), |
|
3295 | 3294 | (b'', b'no-backup', None, _(b'do not save backup copies of files')), |
|
3296 | 3295 | ], |
|
3297 | 3296 | _(b'hg qgoto [OPTION]... PATCH'), |
|
3298 | 3297 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3299 | 3298 | ) |
|
3300 | 3299 | def goto(ui, repo, patch, **opts): |
|
3301 | 3300 | """push or pop patches until named patch is at top of stack |
|
3302 | 3301 | |
|
3303 | 3302 | Returns 0 on success.""" |
|
3304 | 3303 | opts = pycompat.byteskwargs(opts) |
|
3305 | 3304 | opts = fixkeepchangesopts(ui, opts) |
|
3306 | 3305 | q = repo.mq |
|
3307 | 3306 | patch = q.lookup(patch) |
|
3308 | 3307 | nobackup = opts.get(b'no_backup') |
|
3309 | 3308 | keepchanges = opts.get(b'keep_changes') |
|
3310 | 3309 | if q.isapplied(patch): |
|
3311 | 3310 | ret = q.pop( |
|
3312 | 3311 | repo, |
|
3313 | 3312 | patch, |
|
3314 | 3313 | force=opts.get(b'force'), |
|
3315 | 3314 | nobackup=nobackup, |
|
3316 | 3315 | keepchanges=keepchanges, |
|
3317 | 3316 | ) |
|
3318 | 3317 | else: |
|
3319 | 3318 | ret = q.push( |
|
3320 | 3319 | repo, |
|
3321 | 3320 | patch, |
|
3322 | 3321 | force=opts.get(b'force'), |
|
3323 | 3322 | nobackup=nobackup, |
|
3324 | 3323 | keepchanges=keepchanges, |
|
3325 | 3324 | ) |
|
3326 | 3325 | q.savedirty() |
|
3327 | 3326 | return ret |
|
3328 | 3327 | |
|
3329 | 3328 | |
|
3330 | 3329 | @command( |
|
3331 | 3330 | b"qguard", |
|
3332 | 3331 | [ |
|
3333 | 3332 | (b'l', b'list', None, _(b'list all patches and guards')), |
|
3334 | 3333 | (b'n', b'none', None, _(b'drop all guards')), |
|
3335 | 3334 | ], |
|
3336 | 3335 | _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'), |
|
3337 | 3336 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3338 | 3337 | ) |
|
3339 | 3338 | def guard(ui, repo, *args, **opts): |
|
3340 | 3339 | """set or print guards for a patch |
|
3341 | 3340 | |
|
3342 | 3341 | Guards control whether a patch can be pushed. A patch with no |
|
3343 | 3342 | guards is always pushed. A patch with a positive guard ("+foo") is |
|
3344 | 3343 | pushed only if the :hg:`qselect` command has activated it. A patch with |
|
3345 | 3344 | a negative guard ("-foo") is never pushed if the :hg:`qselect` command |
|
3346 | 3345 | has activated it. |
|
3347 | 3346 | |
|
3348 | 3347 | With no arguments, print the currently active guards. |
|
3349 | 3348 | With arguments, set guards for the named patch. |
|
3350 | 3349 | |
|
3351 | 3350 | .. note:: |
|
3352 | 3351 | |
|
3353 | 3352 | Specifying negative guards now requires '--'. |
|
3354 | 3353 | |
|
3355 | 3354 | To set guards on another patch:: |
|
3356 | 3355 | |
|
3357 | 3356 | hg qguard other.patch -- +2.6.17 -stable |
|
3358 | 3357 | |
|
3359 | 3358 | Returns 0 on success. |
|
3360 | 3359 | """ |
|
3361 | 3360 | |
|
3362 | 3361 | def status(idx): |
|
3363 | 3362 | guards = q.seriesguards[idx] or [b'unguarded'] |
|
3364 | 3363 | if q.series[idx] in applied: |
|
3365 | 3364 | state = b'applied' |
|
3366 | 3365 | elif q.pushable(idx)[0]: |
|
3367 | 3366 | state = b'unapplied' |
|
3368 | 3367 | else: |
|
3369 | 3368 | state = b'guarded' |
|
3370 | 3369 | label = b'qguard.patch qguard.%s qseries.%s' % (state, state) |
|
3371 | 3370 | ui.write(b'%s: ' % ui.label(q.series[idx], label)) |
|
3372 | 3371 | |
|
3373 | 3372 | for i, guard in enumerate(guards): |
|
3374 | 3373 | if guard.startswith(b'+'): |
|
3375 | 3374 | ui.write(guard, label=b'qguard.positive') |
|
3376 | 3375 | elif guard.startswith(b'-'): |
|
3377 | 3376 | ui.write(guard, label=b'qguard.negative') |
|
3378 | 3377 | else: |
|
3379 | 3378 | ui.write(guard, label=b'qguard.unguarded') |
|
3380 | 3379 | if i != len(guards) - 1: |
|
3381 | 3380 | ui.write(b' ') |
|
3382 | 3381 | ui.write(b'\n') |
|
3383 | 3382 | |
|
3384 | 3383 | q = repo.mq |
|
3385 | 3384 | applied = {p.name for p in q.applied} |
|
3386 | 3385 | patch = None |
|
3387 | 3386 | args = list(args) |
|
3388 | 3387 | if opts.get('list'): |
|
3389 | 3388 | if args or opts.get('none'): |
|
3390 | 3389 | raise error.Abort( |
|
3391 | 3390 | _(b'cannot mix -l/--list with options or arguments') |
|
3392 | 3391 | ) |
|
3393 | 3392 | for i in pycompat.xrange(len(q.series)): |
|
3394 | 3393 | status(i) |
|
3395 | 3394 | return |
|
3396 | 3395 | if not args or args[0][0:1] in b'-+': |
|
3397 | 3396 | if not q.applied: |
|
3398 | 3397 | raise error.Abort(_(b'no patches applied')) |
|
3399 | 3398 | patch = q.applied[-1].name |
|
3400 | 3399 | if patch is None and args[0][0:1] not in b'-+': |
|
3401 | 3400 | patch = args.pop(0) |
|
3402 | 3401 | if patch is None: |
|
3403 | 3402 | raise error.Abort(_(b'no patch to work with')) |
|
3404 | 3403 | if args or opts.get('none'): |
|
3405 | 3404 | idx = q.findseries(patch) |
|
3406 | 3405 | if idx is None: |
|
3407 | 3406 | raise error.Abort(_(b'no patch named %s') % patch) |
|
3408 | 3407 | q.setguards(idx, args) |
|
3409 | 3408 | q.savedirty() |
|
3410 | 3409 | else: |
|
3411 | 3410 | status(q.series.index(q.lookup(patch))) |
|
3412 | 3411 | |
|
3413 | 3412 | |
|
3414 | 3413 | @command( |
|
3415 | 3414 | b"qheader", |
|
3416 | 3415 | [], |
|
3417 | 3416 | _(b'hg qheader [PATCH]'), |
|
3418 | 3417 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3419 | 3418 | ) |
|
3420 | 3419 | def header(ui, repo, patch=None): |
|
3421 | 3420 | """print the header of the topmost or specified patch |
|
3422 | 3421 | |
|
3423 | 3422 | Returns 0 on success.""" |
|
3424 | 3423 | q = repo.mq |
|
3425 | 3424 | |
|
3426 | 3425 | if patch: |
|
3427 | 3426 | patch = q.lookup(patch) |
|
3428 | 3427 | else: |
|
3429 | 3428 | if not q.applied: |
|
3430 | 3429 | ui.write(_(b'no patches applied\n')) |
|
3431 | 3430 | return 1 |
|
3432 | 3431 | patch = q.lookup(b'qtip') |
|
3433 | 3432 | ph = patchheader(q.join(patch), q.plainmode) |
|
3434 | 3433 | |
|
3435 | 3434 | ui.write(b'\n'.join(ph.message) + b'\n') |
|
3436 | 3435 | |
|
3437 | 3436 | |
|
3438 | 3437 | def lastsavename(path): |
|
3439 | 3438 | (directory, base) = os.path.split(path) |
|
3440 | 3439 | names = os.listdir(directory) |
|
3441 | 3440 | namere = re.compile(b"%s.([0-9]+)" % base) |
|
3442 | 3441 | maxindex = None |
|
3443 | 3442 | maxname = None |
|
3444 | 3443 | for f in names: |
|
3445 | 3444 | m = namere.match(f) |
|
3446 | 3445 | if m: |
|
3447 | 3446 | index = int(m.group(1)) |
|
3448 | 3447 | if maxindex is None or index > maxindex: |
|
3449 | 3448 | maxindex = index |
|
3450 | 3449 | maxname = f |
|
3451 | 3450 | if maxname: |
|
3452 | 3451 | return (os.path.join(directory, maxname), maxindex) |
|
3453 | 3452 | return (None, None) |
|
3454 | 3453 | |
|
3455 | 3454 | |
|
3456 | 3455 | def savename(path): |
|
3457 | 3456 | (last, index) = lastsavename(path) |
|
3458 | 3457 | if last is None: |
|
3459 | 3458 | index = 0 |
|
3460 | 3459 | newpath = path + b".%d" % (index + 1) |
|
3461 | 3460 | return newpath |
|
3462 | 3461 | |
|
3463 | 3462 | |
|
3464 | 3463 | @command( |
|
3465 | 3464 | b"qpush", |
|
3466 | 3465 | [ |
|
3467 | 3466 | ( |
|
3468 | 3467 | b'', |
|
3469 | 3468 | b'keep-changes', |
|
3470 | 3469 | None, |
|
3471 | 3470 | _(b'tolerate non-conflicting local changes'), |
|
3472 | 3471 | ), |
|
3473 | 3472 | (b'f', b'force', None, _(b'apply on top of local changes')), |
|
3474 | 3473 | ( |
|
3475 | 3474 | b'e', |
|
3476 | 3475 | b'exact', |
|
3477 | 3476 | None, |
|
3478 | 3477 | _(b'apply the target patch to its recorded parent'), |
|
3479 | 3478 | ), |
|
3480 | 3479 | (b'l', b'list', None, _(b'list patch name in commit text')), |
|
3481 | 3480 | (b'a', b'all', None, _(b'apply all patches')), |
|
3482 | 3481 | (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')), |
|
3483 | 3482 | (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')), |
|
3484 | 3483 | ( |
|
3485 | 3484 | b'', |
|
3486 | 3485 | b'move', |
|
3487 | 3486 | None, |
|
3488 | 3487 | _(b'reorder patch series and apply only the patch'), |
|
3489 | 3488 | ), |
|
3490 | 3489 | (b'', b'no-backup', None, _(b'do not save backup copies of files')), |
|
3491 | 3490 | ], |
|
3492 | 3491 | _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'), |
|
3493 | 3492 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3494 | 3493 | helpbasic=True, |
|
3495 | 3494 | ) |
|
3496 | 3495 | def push(ui, repo, patch=None, **opts): |
|
3497 | 3496 | """push the next patch onto the stack |
|
3498 | 3497 | |
|
3499 | 3498 | By default, abort if the working directory contains uncommitted |
|
3500 | 3499 | changes. With --keep-changes, abort only if the uncommitted files |
|
3501 | 3500 | overlap with patched files. With -f/--force, backup and patch over |
|
3502 | 3501 | uncommitted changes. |
|
3503 | 3502 | |
|
3504 | 3503 | Return 0 on success. |
|
3505 | 3504 | """ |
|
3506 | 3505 | q = repo.mq |
|
3507 | 3506 | mergeq = None |
|
3508 | 3507 | |
|
3509 | 3508 | opts = pycompat.byteskwargs(opts) |
|
3510 | 3509 | opts = fixkeepchangesopts(ui, opts) |
|
3511 | 3510 | if opts.get(b'merge'): |
|
3512 | 3511 | if opts.get(b'name'): |
|
3513 | 3512 | newpath = repo.vfs.join(opts.get(b'name')) |
|
3514 | 3513 | else: |
|
3515 | 3514 | newpath, i = lastsavename(q.path) |
|
3516 | 3515 | if not newpath: |
|
3517 | 3516 | ui.warn(_(b"no saved queues found, please use -n\n")) |
|
3518 | 3517 | return 1 |
|
3519 | 3518 | mergeq = queue(ui, repo.baseui, repo.path, newpath) |
|
3520 | 3519 | ui.warn(_(b"merging with queue at: %s\n") % mergeq.path) |
|
3521 | 3520 | ret = q.push( |
|
3522 | 3521 | repo, |
|
3523 | 3522 | patch, |
|
3524 | 3523 | force=opts.get(b'force'), |
|
3525 | 3524 | list=opts.get(b'list'), |
|
3526 | 3525 | mergeq=mergeq, |
|
3527 | 3526 | all=opts.get(b'all'), |
|
3528 | 3527 | move=opts.get(b'move'), |
|
3529 | 3528 | exact=opts.get(b'exact'), |
|
3530 | 3529 | nobackup=opts.get(b'no_backup'), |
|
3531 | 3530 | keepchanges=opts.get(b'keep_changes'), |
|
3532 | 3531 | ) |
|
3533 | 3532 | return ret |
|
3534 | 3533 | |
|
3535 | 3534 | |
|
3536 | 3535 | @command( |
|
3537 | 3536 | b"qpop", |
|
3538 | 3537 | [ |
|
3539 | 3538 | (b'a', b'all', None, _(b'pop all patches')), |
|
3540 | 3539 | (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')), |
|
3541 | 3540 | ( |
|
3542 | 3541 | b'', |
|
3543 | 3542 | b'keep-changes', |
|
3544 | 3543 | None, |
|
3545 | 3544 | _(b'tolerate non-conflicting local changes'), |
|
3546 | 3545 | ), |
|
3547 | 3546 | (b'f', b'force', None, _(b'forget any local changes to patched files')), |
|
3548 | 3547 | (b'', b'no-backup', None, _(b'do not save backup copies of files')), |
|
3549 | 3548 | ], |
|
3550 | 3549 | _(b'hg qpop [-a] [-f] [PATCH | INDEX]'), |
|
3551 | 3550 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3552 | 3551 | helpbasic=True, |
|
3553 | 3552 | ) |
|
3554 | 3553 | def pop(ui, repo, patch=None, **opts): |
|
3555 | 3554 | """pop the current patch off the stack |
|
3556 | 3555 | |
|
3557 | 3556 | Without argument, pops off the top of the patch stack. If given a |
|
3558 | 3557 | patch name, keeps popping off patches until the named patch is at |
|
3559 | 3558 | the top of the stack. |
|
3560 | 3559 | |
|
3561 | 3560 | By default, abort if the working directory contains uncommitted |
|
3562 | 3561 | changes. With --keep-changes, abort only if the uncommitted files |
|
3563 | 3562 | overlap with patched files. With -f/--force, backup and discard |
|
3564 | 3563 | changes made to such files. |
|
3565 | 3564 | |
|
3566 | 3565 | Return 0 on success. |
|
3567 | 3566 | """ |
|
3568 | 3567 | opts = pycompat.byteskwargs(opts) |
|
3569 | 3568 | opts = fixkeepchangesopts(ui, opts) |
|
3570 | 3569 | localupdate = True |
|
3571 | 3570 | if opts.get(b'name'): |
|
3572 | 3571 | q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name'))) |
|
3573 | 3572 | ui.warn(_(b'using patch queue: %s\n') % q.path) |
|
3574 | 3573 | localupdate = False |
|
3575 | 3574 | else: |
|
3576 | 3575 | q = repo.mq |
|
3577 | 3576 | ret = q.pop( |
|
3578 | 3577 | repo, |
|
3579 | 3578 | patch, |
|
3580 | 3579 | force=opts.get(b'force'), |
|
3581 | 3580 | update=localupdate, |
|
3582 | 3581 | all=opts.get(b'all'), |
|
3583 | 3582 | nobackup=opts.get(b'no_backup'), |
|
3584 | 3583 | keepchanges=opts.get(b'keep_changes'), |
|
3585 | 3584 | ) |
|
3586 | 3585 | q.savedirty() |
|
3587 | 3586 | return ret |
|
3588 | 3587 | |
|
3589 | 3588 | |
|
3590 | 3589 | @command( |
|
3591 | 3590 | b"qrename|qmv", |
|
3592 | 3591 | [], |
|
3593 | 3592 | _(b'hg qrename PATCH1 [PATCH2]'), |
|
3594 | 3593 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3595 | 3594 | ) |
|
3596 | 3595 | def rename(ui, repo, patch, name=None, **opts): |
|
3597 | 3596 | """rename a patch |
|
3598 | 3597 | |
|
3599 | 3598 | With one argument, renames the current patch to PATCH1. |
|
3600 | 3599 | With two arguments, renames PATCH1 to PATCH2. |
|
3601 | 3600 | |
|
3602 | 3601 | Returns 0 on success.""" |
|
3603 | 3602 | q = repo.mq |
|
3604 | 3603 | if not name: |
|
3605 | 3604 | name = patch |
|
3606 | 3605 | patch = None |
|
3607 | 3606 | |
|
3608 | 3607 | if patch: |
|
3609 | 3608 | patch = q.lookup(patch) |
|
3610 | 3609 | else: |
|
3611 | 3610 | if not q.applied: |
|
3612 | 3611 | ui.write(_(b'no patches applied\n')) |
|
3613 | 3612 | return |
|
3614 | 3613 | patch = q.lookup(b'qtip') |
|
3615 | 3614 | absdest = q.join(name) |
|
3616 | 3615 | if os.path.isdir(absdest): |
|
3617 | 3616 | name = normname(os.path.join(name, os.path.basename(patch))) |
|
3618 | 3617 | absdest = q.join(name) |
|
3619 | 3618 | q.checkpatchname(name) |
|
3620 | 3619 | |
|
3621 | 3620 | ui.note(_(b'renaming %s to %s\n') % (patch, name)) |
|
3622 | 3621 | i = q.findseries(patch) |
|
3623 | 3622 | guards = q.guard_re.findall(q.fullseries[i]) |
|
3624 | 3623 | q.fullseries[i] = name + b''.join([b' #' + g for g in guards]) |
|
3625 | 3624 | q.parseseries() |
|
3626 | 3625 | q.seriesdirty = True |
|
3627 | 3626 | |
|
3628 | 3627 | info = q.isapplied(patch) |
|
3629 | 3628 | if info: |
|
3630 | 3629 | q.applied[info[0]] = statusentry(info[1], name) |
|
3631 | 3630 | q.applieddirty = True |
|
3632 | 3631 | |
|
3633 | 3632 | destdir = os.path.dirname(absdest) |
|
3634 | 3633 | if not os.path.isdir(destdir): |
|
3635 | 3634 | os.makedirs(destdir) |
|
3636 | 3635 | util.rename(q.join(patch), absdest) |
|
3637 | 3636 | r = q.qrepo() |
|
3638 | 3637 | if r and patch in r.dirstate: |
|
3639 | 3638 | wctx = r[None] |
|
3640 | 3639 | with r.wlock(): |
|
3641 | 3640 | if r.dirstate[patch] == b'a': |
|
3642 | 3641 | r.dirstate.drop(patch) |
|
3643 | 3642 | r.dirstate.add(name) |
|
3644 | 3643 | else: |
|
3645 | 3644 | wctx.copy(patch, name) |
|
3646 | 3645 | wctx.forget([patch]) |
|
3647 | 3646 | |
|
3648 | 3647 | q.savedirty() |
|
3649 | 3648 | |
|
3650 | 3649 | |
|
3651 | 3650 | @command( |
|
3652 | 3651 | b"qrestore", |
|
3653 | 3652 | [ |
|
3654 | 3653 | (b'd', b'delete', None, _(b'delete save entry')), |
|
3655 | 3654 | (b'u', b'update', None, _(b'update queue working directory')), |
|
3656 | 3655 | ], |
|
3657 | 3656 | _(b'hg qrestore [-d] [-u] REV'), |
|
3658 | 3657 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3659 | 3658 | ) |
|
3660 | 3659 | def restore(ui, repo, rev, **opts): |
|
3661 | 3660 | """restore the queue state saved by a revision (DEPRECATED) |
|
3662 | 3661 | |
|
3663 | 3662 | This command is deprecated, use :hg:`rebase` instead.""" |
|
3664 | 3663 | rev = repo.lookup(rev) |
|
3665 | 3664 | q = repo.mq |
|
3666 | 3665 | q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update')) |
|
3667 | 3666 | q.savedirty() |
|
3668 | 3667 | return 0 |
|
3669 | 3668 | |
|
3670 | 3669 | |
|
3671 | 3670 | @command( |
|
3672 | 3671 | b"qsave", |
|
3673 | 3672 | [ |
|
3674 | 3673 | (b'c', b'copy', None, _(b'copy patch directory')), |
|
3675 | 3674 | (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')), |
|
3676 | 3675 | (b'e', b'empty', None, _(b'clear queue status file')), |
|
3677 | 3676 | (b'f', b'force', None, _(b'force copy')), |
|
3678 | 3677 | ] |
|
3679 | 3678 | + cmdutil.commitopts, |
|
3680 | 3679 | _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'), |
|
3681 | 3680 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3682 | 3681 | ) |
|
3683 | 3682 | def save(ui, repo, **opts): |
|
3684 | 3683 | """save current queue state (DEPRECATED) |
|
3685 | 3684 | |
|
3686 | 3685 | This command is deprecated, use :hg:`rebase` instead.""" |
|
3687 | 3686 | q = repo.mq |
|
3688 | 3687 | opts = pycompat.byteskwargs(opts) |
|
3689 | 3688 | message = cmdutil.logmessage(ui, opts) |
|
3690 | 3689 | ret = q.save(repo, msg=message) |
|
3691 | 3690 | if ret: |
|
3692 | 3691 | return ret |
|
3693 | 3692 | q.savedirty() # save to .hg/patches before copying |
|
3694 | 3693 | if opts.get(b'copy'): |
|
3695 | 3694 | path = q.path |
|
3696 | 3695 | if opts.get(b'name'): |
|
3697 | 3696 | newpath = os.path.join(q.basepath, opts.get(b'name')) |
|
3698 | 3697 | if os.path.exists(newpath): |
|
3699 | 3698 | if not os.path.isdir(newpath): |
|
3700 | 3699 | raise error.Abort( |
|
3701 | 3700 | _(b'destination %s exists and is not a directory') |
|
3702 | 3701 | % newpath |
|
3703 | 3702 | ) |
|
3704 | 3703 | if not opts.get(b'force'): |
|
3705 | 3704 | raise error.Abort( |
|
3706 | 3705 | _(b'destination %s exists, use -f to force') % newpath |
|
3707 | 3706 | ) |
|
3708 | 3707 | else: |
|
3709 | 3708 | newpath = savename(path) |
|
3710 | 3709 | ui.warn(_(b"copy %s to %s\n") % (path, newpath)) |
|
3711 | 3710 | util.copyfiles(path, newpath) |
|
3712 | 3711 | if opts.get(b'empty'): |
|
3713 | 3712 | del q.applied[:] |
|
3714 | 3713 | q.applieddirty = True |
|
3715 | 3714 | q.savedirty() |
|
3716 | 3715 | return 0 |
|
3717 | 3716 | |
|
3718 | 3717 | |
|
3719 | 3718 | @command( |
|
3720 | 3719 | b"qselect", |
|
3721 | 3720 | [ |
|
3722 | 3721 | (b'n', b'none', None, _(b'disable all guards')), |
|
3723 | 3722 | (b's', b'series', None, _(b'list all guards in series file')), |
|
3724 | 3723 | (b'', b'pop', None, _(b'pop to before first guarded applied patch')), |
|
3725 | 3724 | (b'', b'reapply', None, _(b'pop, then reapply patches')), |
|
3726 | 3725 | ], |
|
3727 | 3726 | _(b'hg qselect [OPTION]... [GUARD]...'), |
|
3728 | 3727 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3729 | 3728 | ) |
|
3730 | 3729 | def select(ui, repo, *args, **opts): |
|
3731 | 3730 | """set or print guarded patches to push |
|
3732 | 3731 | |
|
3733 | 3732 | Use the :hg:`qguard` command to set or print guards on patch, then use |
|
3734 | 3733 | qselect to tell mq which guards to use. A patch will be pushed if |
|
3735 | 3734 | it has no guards or any positive guards match the currently |
|
3736 | 3735 | selected guard, but will not be pushed if any negative guards |
|
3737 | 3736 | match the current guard. For example:: |
|
3738 | 3737 | |
|
3739 | 3738 | qguard foo.patch -- -stable (negative guard) |
|
3740 | 3739 | qguard bar.patch +stable (positive guard) |
|
3741 | 3740 | qselect stable |
|
3742 | 3741 | |
|
3743 | 3742 | This activates the "stable" guard. mq will skip foo.patch (because |
|
3744 | 3743 | it has a negative match) but push bar.patch (because it has a |
|
3745 | 3744 | positive match). |
|
3746 | 3745 | |
|
3747 | 3746 | With no arguments, prints the currently active guards. |
|
3748 | 3747 | With one argument, sets the active guard. |
|
3749 | 3748 | |
|
3750 | 3749 | Use -n/--none to deactivate guards (no other arguments needed). |
|
3751 | 3750 | When no guards are active, patches with positive guards are |
|
3752 | 3751 | skipped and patches with negative guards are pushed. |
|
3753 | 3752 | |
|
3754 | 3753 | qselect can change the guards on applied patches. It does not pop |
|
3755 | 3754 | guarded patches by default. Use --pop to pop back to the last |
|
3756 | 3755 | applied patch that is not guarded. Use --reapply (which implies |
|
3757 | 3756 | --pop) to push back to the current patch afterwards, but skip |
|
3758 | 3757 | guarded patches. |
|
3759 | 3758 | |
|
3760 | 3759 | Use -s/--series to print a list of all guards in the series file |
|
3761 | 3760 | (no other arguments needed). Use -v for more information. |
|
3762 | 3761 | |
|
3763 | 3762 | Returns 0 on success.""" |
|
3764 | 3763 | |
|
3765 | 3764 | q = repo.mq |
|
3766 | 3765 | opts = pycompat.byteskwargs(opts) |
|
3767 | 3766 | guards = q.active() |
|
3768 | 3767 | pushable = lambda i: q.pushable(q.applied[i].name)[0] |
|
3769 | 3768 | if args or opts.get(b'none'): |
|
3770 | 3769 | old_unapplied = q.unapplied(repo) |
|
3771 | 3770 | old_guarded = [ |
|
3772 | 3771 | i for i in pycompat.xrange(len(q.applied)) if not pushable(i) |
|
3773 | 3772 | ] |
|
3774 | 3773 | q.setactive(args) |
|
3775 | 3774 | q.savedirty() |
|
3776 | 3775 | if not args: |
|
3777 | 3776 | ui.status(_(b'guards deactivated\n')) |
|
3778 | 3777 | if not opts.get(b'pop') and not opts.get(b'reapply'): |
|
3779 | 3778 | unapplied = q.unapplied(repo) |
|
3780 | 3779 | guarded = [ |
|
3781 | 3780 | i for i in pycompat.xrange(len(q.applied)) if not pushable(i) |
|
3782 | 3781 | ] |
|
3783 | 3782 | if len(unapplied) != len(old_unapplied): |
|
3784 | 3783 | ui.status( |
|
3785 | 3784 | _( |
|
3786 | 3785 | b'number of unguarded, unapplied patches has ' |
|
3787 | 3786 | b'changed from %d to %d\n' |
|
3788 | 3787 | ) |
|
3789 | 3788 | % (len(old_unapplied), len(unapplied)) |
|
3790 | 3789 | ) |
|
3791 | 3790 | if len(guarded) != len(old_guarded): |
|
3792 | 3791 | ui.status( |
|
3793 | 3792 | _( |
|
3794 | 3793 | b'number of guarded, applied patches has changed ' |
|
3795 | 3794 | b'from %d to %d\n' |
|
3796 | 3795 | ) |
|
3797 | 3796 | % (len(old_guarded), len(guarded)) |
|
3798 | 3797 | ) |
|
3799 | 3798 | elif opts.get(b'series'): |
|
3800 | 3799 | guards = {} |
|
3801 | 3800 | noguards = 0 |
|
3802 | 3801 | for gs in q.seriesguards: |
|
3803 | 3802 | if not gs: |
|
3804 | 3803 | noguards += 1 |
|
3805 | 3804 | for g in gs: |
|
3806 | 3805 | guards.setdefault(g, 0) |
|
3807 | 3806 | guards[g] += 1 |
|
3808 | 3807 | if ui.verbose: |
|
3809 | 3808 | guards[b'NONE'] = noguards |
|
3810 | 3809 | guards = list(guards.items()) |
|
3811 | 3810 | guards.sort(key=lambda x: x[0][1:]) |
|
3812 | 3811 | if guards: |
|
3813 | 3812 | ui.note(_(b'guards in series file:\n')) |
|
3814 | 3813 | for guard, count in guards: |
|
3815 | 3814 | ui.note(b'%2d ' % count) |
|
3816 | 3815 | ui.write(guard, b'\n') |
|
3817 | 3816 | else: |
|
3818 | 3817 | ui.note(_(b'no guards in series file\n')) |
|
3819 | 3818 | else: |
|
3820 | 3819 | if guards: |
|
3821 | 3820 | ui.note(_(b'active guards:\n')) |
|
3822 | 3821 | for g in guards: |
|
3823 | 3822 | ui.write(g, b'\n') |
|
3824 | 3823 | else: |
|
3825 | 3824 | ui.write(_(b'no active guards\n')) |
|
3826 | 3825 | reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name |
|
3827 | 3826 | popped = False |
|
3828 | 3827 | if opts.get(b'pop') or opts.get(b'reapply'): |
|
3829 | 3828 | for i in pycompat.xrange(len(q.applied)): |
|
3830 | 3829 | if not pushable(i): |
|
3831 | 3830 | ui.status(_(b'popping guarded patches\n')) |
|
3832 | 3831 | popped = True |
|
3833 | 3832 | if i == 0: |
|
3834 | 3833 | q.pop(repo, all=True) |
|
3835 | 3834 | else: |
|
3836 | 3835 | q.pop(repo, q.applied[i - 1].name) |
|
3837 | 3836 | break |
|
3838 | 3837 | if popped: |
|
3839 | 3838 | try: |
|
3840 | 3839 | if reapply: |
|
3841 | 3840 | ui.status(_(b'reapplying unguarded patches\n')) |
|
3842 | 3841 | q.push(repo, reapply) |
|
3843 | 3842 | finally: |
|
3844 | 3843 | q.savedirty() |
|
3845 | 3844 | |
|
3846 | 3845 | |
|
3847 | 3846 | @command( |
|
3848 | 3847 | b"qfinish", |
|
3849 | 3848 | [(b'a', b'applied', None, _(b'finish all applied changesets'))], |
|
3850 | 3849 | _(b'hg qfinish [-a] [REV]...'), |
|
3851 | 3850 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3852 | 3851 | ) |
|
3853 | 3852 | def finish(ui, repo, *revrange, **opts): |
|
3854 | 3853 | """move applied patches into repository history |
|
3855 | 3854 | |
|
3856 | 3855 | Finishes the specified revisions (corresponding to applied |
|
3857 | 3856 | patches) by moving them out of mq control into regular repository |
|
3858 | 3857 | history. |
|
3859 | 3858 | |
|
3860 | 3859 | Accepts a revision range or the -a/--applied option. If --applied |
|
3861 | 3860 | is specified, all applied mq revisions are removed from mq |
|
3862 | 3861 | control. Otherwise, the given revisions must be at the base of the |
|
3863 | 3862 | stack of applied patches. |
|
3864 | 3863 | |
|
3865 | 3864 | This can be especially useful if your changes have been applied to |
|
3866 | 3865 | an upstream repository, or if you are about to push your changes |
|
3867 | 3866 | to upstream. |
|
3868 | 3867 | |
|
3869 | 3868 | Returns 0 on success. |
|
3870 | 3869 | """ |
|
3871 | 3870 | if not opts.get('applied') and not revrange: |
|
3872 | 3871 | raise error.Abort(_(b'no revisions specified')) |
|
3873 | 3872 | elif opts.get('applied'): |
|
3874 | 3873 | revrange = (b'qbase::qtip',) + revrange |
|
3875 | 3874 | |
|
3876 | 3875 | q = repo.mq |
|
3877 | 3876 | if not q.applied: |
|
3878 | 3877 | ui.status(_(b'no patches applied\n')) |
|
3879 | 3878 | return 0 |
|
3880 | 3879 | |
|
3881 | 3880 | revs = scmutil.revrange(repo, revrange) |
|
3882 | 3881 | if repo[b'.'].rev() in revs and repo[None].files(): |
|
3883 | 3882 | ui.warn(_(b'warning: uncommitted changes in the working directory\n')) |
|
3884 | 3883 | # queue.finish may changes phases but leave the responsibility to lock the |
|
3885 | 3884 | # repo to the caller to avoid deadlock with wlock. This command code is |
|
3886 | 3885 | # responsibility for this locking. |
|
3887 | 3886 | with repo.lock(): |
|
3888 | 3887 | q.finish(repo, revs) |
|
3889 | 3888 | q.savedirty() |
|
3890 | 3889 | return 0 |
|
3891 | 3890 | |
|
3892 | 3891 | |
|
3893 | 3892 | @command( |
|
3894 | 3893 | b"qqueue", |
|
3895 | 3894 | [ |
|
3896 | 3895 | (b'l', b'list', False, _(b'list all available queues')), |
|
3897 | 3896 | (b'', b'active', False, _(b'print name of active queue')), |
|
3898 | 3897 | (b'c', b'create', False, _(b'create new queue')), |
|
3899 | 3898 | (b'', b'rename', False, _(b'rename active queue')), |
|
3900 | 3899 | (b'', b'delete', False, _(b'delete reference to queue')), |
|
3901 | 3900 | (b'', b'purge', False, _(b'delete queue, and remove patch dir')), |
|
3902 | 3901 | ], |
|
3903 | 3902 | _(b'[OPTION] [QUEUE]'), |
|
3904 | 3903 | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, |
|
3905 | 3904 | ) |
|
3906 | 3905 | def qqueue(ui, repo, name=None, **opts): |
|
3907 | 3906 | """manage multiple patch queues |
|
3908 | 3907 | |
|
3909 | 3908 | Supports switching between different patch queues, as well as creating |
|
3910 | 3909 | new patch queues and deleting existing ones. |
|
3911 | 3910 | |
|
3912 | 3911 | Omitting a queue name or specifying -l/--list will show you the registered |
|
3913 | 3912 | queues - by default the "normal" patches queue is registered. The currently |
|
3914 | 3913 | active queue will be marked with "(active)". Specifying --active will print |
|
3915 | 3914 | only the name of the active queue. |
|
3916 | 3915 | |
|
3917 | 3916 | To create a new queue, use -c/--create. The queue is automatically made |
|
3918 | 3917 | active, except in the case where there are applied patches from the |
|
3919 | 3918 | currently active queue in the repository. Then the queue will only be |
|
3920 | 3919 | created and switching will fail. |
|
3921 | 3920 | |
|
3922 | 3921 | To delete an existing queue, use --delete. You cannot delete the currently |
|
3923 | 3922 | active queue. |
|
3924 | 3923 | |
|
3925 | 3924 | Returns 0 on success. |
|
3926 | 3925 | """ |
|
3927 | 3926 | q = repo.mq |
|
3928 | 3927 | _defaultqueue = b'patches' |
|
3929 | 3928 | _allqueues = b'patches.queues' |
|
3930 | 3929 | _activequeue = b'patches.queue' |
|
3931 | 3930 | |
|
3932 | 3931 | def _getcurrent(): |
|
3933 | 3932 | cur = os.path.basename(q.path) |
|
3934 | 3933 | if cur.startswith(b'patches-'): |
|
3935 | 3934 | cur = cur[8:] |
|
3936 | 3935 | return cur |
|
3937 | 3936 | |
|
3938 | 3937 | def _noqueues(): |
|
3939 | 3938 | try: |
|
3940 | 3939 | fh = repo.vfs(_allqueues, b'r') |
|
3941 | 3940 | fh.close() |
|
3942 | 3941 | except IOError: |
|
3943 | 3942 | return True |
|
3944 | 3943 | |
|
3945 | 3944 | return False |
|
3946 | 3945 | |
|
3947 | 3946 | def _getqueues(): |
|
3948 | 3947 | current = _getcurrent() |
|
3949 | 3948 | |
|
3950 | 3949 | try: |
|
3951 | 3950 | fh = repo.vfs(_allqueues, b'r') |
|
3952 | 3951 | queues = [queue.strip() for queue in fh if queue.strip()] |
|
3953 | 3952 | fh.close() |
|
3954 | 3953 | if current not in queues: |
|
3955 | 3954 | queues.append(current) |
|
3956 | 3955 | except IOError: |
|
3957 | 3956 | queues = [_defaultqueue] |
|
3958 | 3957 | |
|
3959 | 3958 | return sorted(queues) |
|
3960 | 3959 | |
|
3961 | 3960 | def _setactive(name): |
|
3962 | 3961 | if q.applied: |
|
3963 | 3962 | raise error.Abort( |
|
3964 | 3963 | _( |
|
3965 | 3964 | b'new queue created, but cannot make active ' |
|
3966 | 3965 | b'as patches are applied' |
|
3967 | 3966 | ) |
|
3968 | 3967 | ) |
|
3969 | 3968 | _setactivenocheck(name) |
|
3970 | 3969 | |
|
3971 | 3970 | def _setactivenocheck(name): |
|
3972 | 3971 | fh = repo.vfs(_activequeue, b'w') |
|
3973 | 3972 | if name != b'patches': |
|
3974 | 3973 | fh.write(name) |
|
3975 | 3974 | fh.close() |
|
3976 | 3975 | |
|
3977 | 3976 | def _addqueue(name): |
|
3978 | 3977 | fh = repo.vfs(_allqueues, b'a') |
|
3979 | 3978 | fh.write(b'%s\n' % (name,)) |
|
3980 | 3979 | fh.close() |
|
3981 | 3980 | |
|
3982 | 3981 | def _queuedir(name): |
|
3983 | 3982 | if name == b'patches': |
|
3984 | 3983 | return repo.vfs.join(b'patches') |
|
3985 | 3984 | else: |
|
3986 | 3985 | return repo.vfs.join(b'patches-' + name) |
|
3987 | 3986 | |
|
3988 | 3987 | def _validname(name): |
|
3989 | 3988 | for n in name: |
|
3990 | 3989 | if n in b':\\/.': |
|
3991 | 3990 | return False |
|
3992 | 3991 | return True |
|
3993 | 3992 | |
|
3994 | 3993 | def _delete(name): |
|
3995 | 3994 | if name not in existing: |
|
3996 | 3995 | raise error.Abort(_(b'cannot delete queue that does not exist')) |
|
3997 | 3996 | |
|
3998 | 3997 | current = _getcurrent() |
|
3999 | 3998 | |
|
4000 | 3999 | if name == current: |
|
4001 | 4000 | raise error.Abort(_(b'cannot delete currently active queue')) |
|
4002 | 4001 | |
|
4003 | 4002 | fh = repo.vfs(b'patches.queues.new', b'w') |
|
4004 | 4003 | for queue in existing: |
|
4005 | 4004 | if queue == name: |
|
4006 | 4005 | continue |
|
4007 | 4006 | fh.write(b'%s\n' % (queue,)) |
|
4008 | 4007 | fh.close() |
|
4009 | 4008 | repo.vfs.rename(b'patches.queues.new', _allqueues) |
|
4010 | 4009 | |
|
4011 | 4010 | opts = pycompat.byteskwargs(opts) |
|
4012 | 4011 | if not name or opts.get(b'list') or opts.get(b'active'): |
|
4013 | 4012 | current = _getcurrent() |
|
4014 | 4013 | if opts.get(b'active'): |
|
4015 | 4014 | ui.write(b'%s\n' % (current,)) |
|
4016 | 4015 | return |
|
4017 | 4016 | for queue in _getqueues(): |
|
4018 | 4017 | ui.write(b'%s' % (queue,)) |
|
4019 | 4018 | if queue == current and not ui.quiet: |
|
4020 | 4019 | ui.write(_(b' (active)\n')) |
|
4021 | 4020 | else: |
|
4022 | 4021 | ui.write(b'\n') |
|
4023 | 4022 | return |
|
4024 | 4023 | |
|
4025 | 4024 | if not _validname(name): |
|
4026 | 4025 | raise error.Abort( |
|
4027 | 4026 | _(b'invalid queue name, may not contain the characters ":\\/."') |
|
4028 | 4027 | ) |
|
4029 | 4028 | |
|
4030 | 4029 | with repo.wlock(): |
|
4031 | 4030 | existing = _getqueues() |
|
4032 | 4031 | |
|
4033 | 4032 | if opts.get(b'create'): |
|
4034 | 4033 | if name in existing: |
|
4035 | 4034 | raise error.Abort(_(b'queue "%s" already exists') % name) |
|
4036 | 4035 | if _noqueues(): |
|
4037 | 4036 | _addqueue(_defaultqueue) |
|
4038 | 4037 | _addqueue(name) |
|
4039 | 4038 | _setactive(name) |
|
4040 | 4039 | elif opts.get(b'rename'): |
|
4041 | 4040 | current = _getcurrent() |
|
4042 | 4041 | if name == current: |
|
4043 | 4042 | raise error.Abort( |
|
4044 | 4043 | _(b'can\'t rename "%s" to its current name') % name |
|
4045 | 4044 | ) |
|
4046 | 4045 | if name in existing: |
|
4047 | 4046 | raise error.Abort(_(b'queue "%s" already exists') % name) |
|
4048 | 4047 | |
|
4049 | 4048 | olddir = _queuedir(current) |
|
4050 | 4049 | newdir = _queuedir(name) |
|
4051 | 4050 | |
|
4052 | 4051 | if os.path.exists(newdir): |
|
4053 | 4052 | raise error.Abort( |
|
4054 | 4053 | _(b'non-queue directory "%s" already exists') % newdir |
|
4055 | 4054 | ) |
|
4056 | 4055 | |
|
4057 | 4056 | fh = repo.vfs(b'patches.queues.new', b'w') |
|
4058 | 4057 | for queue in existing: |
|
4059 | 4058 | if queue == current: |
|
4060 | 4059 | fh.write(b'%s\n' % (name,)) |
|
4061 | 4060 | if os.path.exists(olddir): |
|
4062 | 4061 | util.rename(olddir, newdir) |
|
4063 | 4062 | else: |
|
4064 | 4063 | fh.write(b'%s\n' % (queue,)) |
|
4065 | 4064 | fh.close() |
|
4066 | 4065 | repo.vfs.rename(b'patches.queues.new', _allqueues) |
|
4067 | 4066 | _setactivenocheck(name) |
|
4068 | 4067 | elif opts.get(b'delete'): |
|
4069 | 4068 | _delete(name) |
|
4070 | 4069 | elif opts.get(b'purge'): |
|
4071 | 4070 | if name in existing: |
|
4072 | 4071 | _delete(name) |
|
4073 | 4072 | qdir = _queuedir(name) |
|
4074 | 4073 | if os.path.exists(qdir): |
|
4075 | 4074 | shutil.rmtree(qdir) |
|
4076 | 4075 | else: |
|
4077 | 4076 | if name not in existing: |
|
4078 | 4077 | raise error.Abort(_(b'use --create to create a new queue')) |
|
4079 | 4078 | _setactive(name) |
|
4080 | 4079 | |
|
4081 | 4080 | |
|
4082 | 4081 | def mqphasedefaults(repo, roots): |
|
4083 | 4082 | """callback used to set mq changeset as secret when no phase data exists""" |
|
4084 | 4083 | if repo.mq.applied: |
|
4085 | 4084 | if repo.ui.configbool(b'mq', b'secret'): |
|
4086 | 4085 | mqphase = phases.secret |
|
4087 | 4086 | else: |
|
4088 | 4087 | mqphase = phases.draft |
|
4089 | 4088 | qbase = repo[repo.mq.applied[0].node] |
|
4090 | 4089 | roots[mqphase].add(qbase.node()) |
|
4091 | 4090 | return roots |
|
4092 | 4091 | |
|
4093 | 4092 | |
|
4094 | 4093 | def reposetup(ui, repo): |
|
4095 | 4094 | class mqrepo(repo.__class__): |
|
4096 | 4095 | @localrepo.unfilteredpropertycache |
|
4097 | 4096 | def mq(self): |
|
4098 | 4097 | return queue(self.ui, self.baseui, self.path) |
|
4099 | 4098 | |
|
4100 | 4099 | def invalidateall(self): |
|
4101 | 4100 | super(mqrepo, self).invalidateall() |
|
4102 | 4101 | if localrepo.hasunfilteredcache(self, 'mq'): |
|
4103 | 4102 | # recreate mq in case queue path was changed |
|
4104 | 4103 | delattr(self.unfiltered(), 'mq') |
|
4105 | 4104 | |
|
4106 | 4105 | def abortifwdirpatched(self, errmsg, force=False): |
|
4107 | 4106 | if self.mq.applied and self.mq.checkapplied and not force: |
|
4108 | 4107 | parents = self.dirstate.parents() |
|
4109 | 4108 | patches = [s.node for s in self.mq.applied] |
|
4110 | 4109 | if any(p in patches for p in parents): |
|
4111 | 4110 | raise error.Abort(errmsg) |
|
4112 | 4111 | |
|
4113 | 4112 | def commit( |
|
4114 | 4113 | self, |
|
4115 | 4114 | text=b"", |
|
4116 | 4115 | user=None, |
|
4117 | 4116 | date=None, |
|
4118 | 4117 | match=None, |
|
4119 | 4118 | force=False, |
|
4120 | 4119 | editor=False, |
|
4121 | 4120 | extra=None, |
|
4122 | 4121 | ): |
|
4123 | 4122 | if extra is None: |
|
4124 | 4123 | extra = {} |
|
4125 | 4124 | self.abortifwdirpatched( |
|
4126 | 4125 | _(b'cannot commit over an applied mq patch'), force |
|
4127 | 4126 | ) |
|
4128 | 4127 | |
|
4129 | 4128 | return super(mqrepo, self).commit( |
|
4130 | 4129 | text, user, date, match, force, editor, extra |
|
4131 | 4130 | ) |
|
4132 | 4131 | |
|
4133 | 4132 | def checkpush(self, pushop): |
|
4134 | 4133 | if self.mq.applied and self.mq.checkapplied and not pushop.force: |
|
4135 | 4134 | outapplied = [e.node for e in self.mq.applied] |
|
4136 | 4135 | if pushop.revs: |
|
4137 | 4136 | # Assume applied patches have no non-patch descendants and |
|
4138 | 4137 | # are not on remote already. Filtering any changeset not |
|
4139 | 4138 | # pushed. |
|
4140 | 4139 | heads = set(pushop.revs) |
|
4141 | 4140 | for node in reversed(outapplied): |
|
4142 | 4141 | if node in heads: |
|
4143 | 4142 | break |
|
4144 | 4143 | else: |
|
4145 | 4144 | outapplied.pop() |
|
4146 | 4145 | # looking for pushed and shared changeset |
|
4147 | 4146 | for node in outapplied: |
|
4148 | 4147 | if self[node].phase() < phases.secret: |
|
4149 | 4148 | raise error.Abort(_(b'source has mq patches applied')) |
|
4150 | 4149 | # no non-secret patches pushed |
|
4151 | 4150 | super(mqrepo, self).checkpush(pushop) |
|
4152 | 4151 | |
|
4153 | 4152 | def _findtags(self): |
|
4154 | 4153 | '''augment tags from base class with patch tags''' |
|
4155 | 4154 | result = super(mqrepo, self)._findtags() |
|
4156 | 4155 | |
|
4157 | 4156 | q = self.mq |
|
4158 | 4157 | if not q.applied: |
|
4159 | 4158 | return result |
|
4160 | 4159 | |
|
4161 | 4160 | mqtags = [(patch.node, patch.name) for patch in q.applied] |
|
4162 | 4161 | |
|
4163 | 4162 | try: |
|
4164 | 4163 | # for now ignore filtering business |
|
4165 | 4164 | self.unfiltered().changelog.rev(mqtags[-1][0]) |
|
4166 | 4165 | except error.LookupError: |
|
4167 | 4166 | self.ui.warn( |
|
4168 | 4167 | _(b'mq status file refers to unknown node %s\n') |
|
4169 | 4168 | % short(mqtags[-1][0]) |
|
4170 | 4169 | ) |
|
4171 | 4170 | return result |
|
4172 | 4171 | |
|
4173 | 4172 | # do not add fake tags for filtered revisions |
|
4174 | 4173 | included = self.changelog.hasnode |
|
4175 | 4174 | mqtags = [mqt for mqt in mqtags if included(mqt[0])] |
|
4176 | 4175 | if not mqtags: |
|
4177 | 4176 | return result |
|
4178 | 4177 | |
|
4179 | 4178 | mqtags.append((mqtags[-1][0], b'qtip')) |
|
4180 | 4179 | mqtags.append((mqtags[0][0], b'qbase')) |
|
4181 | 4180 | mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent')) |
|
4182 | 4181 | tags = result[0] |
|
4183 | 4182 | for patch in mqtags: |
|
4184 | 4183 | if patch[1] in tags: |
|
4185 | 4184 | self.ui.warn( |
|
4186 | 4185 | _(b'tag %s overrides mq patch of the same name\n') |
|
4187 | 4186 | % patch[1] |
|
4188 | 4187 | ) |
|
4189 | 4188 | else: |
|
4190 | 4189 | tags[patch[1]] = patch[0] |
|
4191 | 4190 | |
|
4192 | 4191 | return result |
|
4193 | 4192 | |
|
4194 | 4193 | if repo.local(): |
|
4195 | 4194 | repo.__class__ = mqrepo |
|
4196 | 4195 | |
|
4197 | 4196 | repo._phasedefaults.append(mqphasedefaults) |
|
4198 | 4197 | |
|
4199 | 4198 | |
|
4200 | 4199 | def mqimport(orig, ui, repo, *args, **kwargs): |
|
4201 | 4200 | if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get( |
|
4202 | 4201 | 'no_commit', False |
|
4203 | 4202 | ): |
|
4204 | 4203 | repo.abortifwdirpatched( |
|
4205 | 4204 | _(b'cannot import over an applied patch'), kwargs.get('force') |
|
4206 | 4205 | ) |
|
4207 | 4206 | return orig(ui, repo, *args, **kwargs) |
|
4208 | 4207 | |
|
4209 | 4208 | |
|
4210 | 4209 | def mqinit(orig, ui, *args, **kwargs): |
|
4211 | 4210 | mq = kwargs.pop('mq', None) |
|
4212 | 4211 | |
|
4213 | 4212 | if not mq: |
|
4214 | 4213 | return orig(ui, *args, **kwargs) |
|
4215 | 4214 | |
|
4216 | 4215 | if args: |
|
4217 | 4216 | repopath = args[0] |
|
4218 | 4217 | if not hg.islocal(repopath): |
|
4219 | 4218 | raise error.Abort( |
|
4220 | 4219 | _(b'only a local queue repository may be initialized') |
|
4221 | 4220 | ) |
|
4222 | 4221 | else: |
|
4223 | 4222 | repopath = cmdutil.findrepo(encoding.getcwd()) |
|
4224 | 4223 | if not repopath: |
|
4225 | 4224 | raise error.Abort( |
|
4226 | 4225 | _(b'there is no Mercurial repository here (.hg not found)') |
|
4227 | 4226 | ) |
|
4228 | 4227 | repo = hg.repository(ui, repopath) |
|
4229 | 4228 | return qinit(ui, repo, True) |
|
4230 | 4229 | |
|
4231 | 4230 | |
|
4232 | 4231 | def mqcommand(orig, ui, repo, *args, **kwargs): |
|
4233 | 4232 | """Add --mq option to operate on patch repository instead of main""" |
|
4234 | 4233 | |
|
4235 | 4234 | # some commands do not like getting unknown options |
|
4236 | 4235 | mq = kwargs.pop('mq', None) |
|
4237 | 4236 | |
|
4238 | 4237 | if not mq: |
|
4239 | 4238 | return orig(ui, repo, *args, **kwargs) |
|
4240 | 4239 | |
|
4241 | 4240 | q = repo.mq |
|
4242 | 4241 | r = q.qrepo() |
|
4243 | 4242 | if not r: |
|
4244 | 4243 | raise error.Abort(_(b'no queue repository')) |
|
4245 | 4244 | return orig(r.ui, r, *args, **kwargs) |
|
4246 | 4245 | |
|
4247 | 4246 | |
|
4248 | 4247 | def summaryhook(ui, repo): |
|
4249 | 4248 | q = repo.mq |
|
4250 | 4249 | m = [] |
|
4251 | 4250 | a, u = len(q.applied), len(q.unapplied(repo)) |
|
4252 | 4251 | if a: |
|
4253 | 4252 | m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a) |
|
4254 | 4253 | if u: |
|
4255 | 4254 | m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u) |
|
4256 | 4255 | if m: |
|
4257 | 4256 | # i18n: column positioning for "hg summary" |
|
4258 | 4257 | ui.write(_(b"mq: %s\n") % b', '.join(m)) |
|
4259 | 4258 | else: |
|
4260 | 4259 | # i18n: column positioning for "hg summary" |
|
4261 | 4260 | ui.note(_(b"mq: (empty queue)\n")) |
|
4262 | 4261 | |
|
4263 | 4262 | |
|
4264 | 4263 | revsetpredicate = registrar.revsetpredicate() |
|
4265 | 4264 | |
|
4266 | 4265 | |
|
4267 | 4266 | @revsetpredicate(b'mq()') |
|
4268 | 4267 | def revsetmq(repo, subset, x): |
|
4269 | 4268 | """Changesets managed by MQ.""" |
|
4270 | 4269 | revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments")) |
|
4271 | 4270 | applied = {repo[r.node].rev() for r in repo.mq.applied} |
|
4272 | 4271 | return smartset.baseset([r for r in subset if r in applied]) |
|
4273 | 4272 | |
|
4274 | 4273 | |
|
4275 | 4274 | # tell hggettext to extract docstrings from these functions: |
|
4276 | 4275 | i18nfunctions = [revsetmq] |
|
4277 | 4276 | |
|
4278 | 4277 | |
|
4279 | 4278 | def extsetup(ui): |
|
4280 | 4279 | # Ensure mq wrappers are called first, regardless of extension load order by |
|
4281 | 4280 | # NOT wrapping in uisetup() and instead deferring to init stage two here. |
|
4282 | 4281 | mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))] |
|
4283 | 4282 | |
|
4284 | 4283 | extensions.wrapcommand(commands.table, b'import', mqimport) |
|
4285 | 4284 | cmdutil.summaryhooks.add(b'mq', summaryhook) |
|
4286 | 4285 | |
|
4287 | 4286 | entry = extensions.wrapcommand(commands.table, b'init', mqinit) |
|
4288 | 4287 | entry[1].extend(mqopt) |
|
4289 | 4288 | |
|
4290 | 4289 | def dotable(cmdtable): |
|
4291 | 4290 | for cmd, entry in pycompat.iteritems(cmdtable): |
|
4292 | 4291 | cmd = cmdutil.parsealiases(cmd)[0] |
|
4293 | 4292 | func = entry[0] |
|
4294 | 4293 | if func.norepo: |
|
4295 | 4294 | continue |
|
4296 | 4295 | entry = extensions.wrapcommand(cmdtable, cmd, mqcommand) |
|
4297 | 4296 | entry[1].extend(mqopt) |
|
4298 | 4297 | |
|
4299 | 4298 | dotable(commands.table) |
|
4300 | 4299 | |
|
4301 | 4300 | thismodule = sys.modules["hgext.mq"] |
|
4302 | 4301 | for extname, extmodule in extensions.extensions(): |
|
4303 | 4302 | if extmodule != thismodule: |
|
4304 | 4303 | dotable(getattr(extmodule, 'cmdtable', {})) |
|
4305 | 4304 | |
|
4306 | 4305 | |
|
4307 | 4306 | colortable = { |
|
4308 | 4307 | b'qguard.negative': b'red', |
|
4309 | 4308 | b'qguard.positive': b'yellow', |
|
4310 | 4309 | b'qguard.unguarded': b'green', |
|
4311 | 4310 | b'qseries.applied': b'blue bold underline', |
|
4312 | 4311 | b'qseries.guarded': b'black bold', |
|
4313 | 4312 | b'qseries.missing': b'red bold', |
|
4314 | 4313 | b'qseries.unapplied': b'black bold', |
|
4315 | 4314 | } |
@@ -1,356 +1,355 b'' | |||
|
1 | 1 | # narrowbundle2.py - bundle2 extensions for narrow repository support |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Google, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import struct |
|
12 | 12 | |
|
13 | 13 | from mercurial.i18n import _ |
|
14 | from mercurial.node import nullid | |
|
15 | 14 | from mercurial import ( |
|
16 | 15 | bundle2, |
|
17 | 16 | changegroup, |
|
18 | 17 | error, |
|
19 | 18 | exchange, |
|
20 | 19 | localrepo, |
|
21 | 20 | narrowspec, |
|
22 | 21 | repair, |
|
23 | 22 | requirements, |
|
24 | 23 | scmutil, |
|
25 | 24 | util, |
|
26 | 25 | wireprototypes, |
|
27 | 26 | ) |
|
28 | 27 | from mercurial.utils import stringutil |
|
29 | 28 | |
|
30 | 29 | _NARROWACL_SECTION = b'narrowacl' |
|
31 | 30 | _CHANGESPECPART = b'narrow:changespec' |
|
32 | 31 | _RESSPECS = b'narrow:responsespec' |
|
33 | 32 | _SPECPART = b'narrow:spec' |
|
34 | 33 | _SPECPART_INCLUDE = b'include' |
|
35 | 34 | _SPECPART_EXCLUDE = b'exclude' |
|
36 | 35 | _KILLNODESIGNAL = b'KILL' |
|
37 | 36 | _DONESIGNAL = b'DONE' |
|
38 | 37 | _ELIDEDCSHEADER = b'>20s20s20sl' # cset id, p1, p2, len(text) |
|
39 | 38 | _ELIDEDMFHEADER = b'>20s20s20s20sl' # manifest id, p1, p2, link id, len(text) |
|
40 | 39 | _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) |
|
41 | 40 | _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) |
|
42 | 41 | |
|
43 | 42 | # Serve a changegroup for a client with a narrow clone. |
|
44 | 43 | def getbundlechangegrouppart_narrow( |
|
45 | 44 | bundler, |
|
46 | 45 | repo, |
|
47 | 46 | source, |
|
48 | 47 | bundlecaps=None, |
|
49 | 48 | b2caps=None, |
|
50 | 49 | heads=None, |
|
51 | 50 | common=None, |
|
52 | 51 | **kwargs |
|
53 | 52 | ): |
|
54 | 53 | assert repo.ui.configbool(b'experimental', b'narrowservebrokenellipses') |
|
55 | 54 | |
|
56 | 55 | cgversions = b2caps.get(b'changegroup') |
|
57 | 56 | cgversions = [ |
|
58 | 57 | v |
|
59 | 58 | for v in cgversions |
|
60 | 59 | if v in changegroup.supportedoutgoingversions(repo) |
|
61 | 60 | ] |
|
62 | 61 | if not cgversions: |
|
63 | 62 | raise ValueError(_(b'no common changegroup version')) |
|
64 | 63 | version = max(cgversions) |
|
65 | 64 | |
|
66 | 65 | include = sorted(filter(bool, kwargs.get('includepats', []))) |
|
67 | 66 | exclude = sorted(filter(bool, kwargs.get('excludepats', []))) |
|
68 | 67 | generateellipsesbundle2( |
|
69 | 68 | bundler, |
|
70 | 69 | repo, |
|
71 | 70 | include, |
|
72 | 71 | exclude, |
|
73 | 72 | version, |
|
74 | 73 | common, |
|
75 | 74 | heads, |
|
76 | 75 | kwargs.get('depth', None), |
|
77 | 76 | ) |
|
78 | 77 | |
|
79 | 78 | |
|
80 | 79 | def generateellipsesbundle2( |
|
81 | 80 | bundler, |
|
82 | 81 | repo, |
|
83 | 82 | include, |
|
84 | 83 | exclude, |
|
85 | 84 | version, |
|
86 | 85 | common, |
|
87 | 86 | heads, |
|
88 | 87 | depth, |
|
89 | 88 | ): |
|
90 | 89 | match = narrowspec.match(repo.root, include=include, exclude=exclude) |
|
91 | 90 | if depth is not None: |
|
92 | 91 | depth = int(depth) |
|
93 | 92 | if depth < 1: |
|
94 | 93 | raise error.Abort(_(b'depth must be positive, got %d') % depth) |
|
95 | 94 | |
|
96 | 95 | heads = set(heads or repo.heads()) |
|
97 | common = set(common or [nullid]) | |
|
96 | common = set(common or [repo.nullid]) | |
|
98 | 97 | |
|
99 | 98 | visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis( |
|
100 | 99 | repo, common, heads, set(), match, depth=depth |
|
101 | 100 | ) |
|
102 | 101 | |
|
103 | 102 | repo.ui.debug(b'Found %d relevant revs\n' % len(relevant_nodes)) |
|
104 | 103 | if visitnodes: |
|
105 | 104 | packer = changegroup.getbundler( |
|
106 | 105 | version, |
|
107 | 106 | repo, |
|
108 | 107 | matcher=match, |
|
109 | 108 | ellipses=True, |
|
110 | 109 | shallow=depth is not None, |
|
111 | 110 | ellipsisroots=ellipsisroots, |
|
112 | 111 | fullnodes=relevant_nodes, |
|
113 | 112 | ) |
|
114 | 113 | cgdata = packer.generate(common, visitnodes, False, b'narrow_widen') |
|
115 | 114 | |
|
116 | 115 | part = bundler.newpart(b'changegroup', data=cgdata) |
|
117 | 116 | part.addparam(b'version', version) |
|
118 | 117 | if scmutil.istreemanifest(repo): |
|
119 | 118 | part.addparam(b'treemanifest', b'1') |
|
120 | 119 | |
|
121 | 120 | |
|
122 | 121 | def generate_ellipses_bundle2_for_widening( |
|
123 | 122 | bundler, |
|
124 | 123 | repo, |
|
125 | 124 | oldmatch, |
|
126 | 125 | newmatch, |
|
127 | 126 | version, |
|
128 | 127 | common, |
|
129 | 128 | known, |
|
130 | 129 | ): |
|
131 | common = set(common or [nullid]) | |
|
130 | common = set(common or [repo.nullid]) | |
|
132 | 131 | # Steps: |
|
133 | 132 | # 1. Send kill for "$known & ::common" |
|
134 | 133 | # |
|
135 | 134 | # 2. Send changegroup for ::common |
|
136 | 135 | # |
|
137 | 136 | # 3. Proceed. |
|
138 | 137 | # |
|
139 | 138 | # In the future, we can send kills for only the specific |
|
140 | 139 | # nodes we know should go away or change shape, and then |
|
141 | 140 | # send a data stream that tells the client something like this: |
|
142 | 141 | # |
|
143 | 142 | # a) apply this changegroup |
|
144 | 143 | # b) apply nodes XXX, YYY, ZZZ that you already have |
|
145 | 144 | # c) goto a |
|
146 | 145 | # |
|
147 | 146 | # until they've built up the full new state. |
|
148 | 147 | knownrevs = {repo.changelog.rev(n) for n in known} |
|
149 | 148 | # TODO: we could send only roots() of this set, and the |
|
150 | 149 | # list of nodes in common, and the client could work out |
|
151 | 150 | # what to strip, instead of us explicitly sending every |
|
152 | 151 | # single node. |
|
153 | 152 | deadrevs = knownrevs |
|
154 | 153 | |
|
155 | 154 | def genkills(): |
|
156 | 155 | for r in deadrevs: |
|
157 | 156 | yield _KILLNODESIGNAL |
|
158 | 157 | yield repo.changelog.node(r) |
|
159 | 158 | yield _DONESIGNAL |
|
160 | 159 | |
|
161 | 160 | bundler.newpart(_CHANGESPECPART, data=genkills()) |
|
162 | 161 | newvisit, newfull, newellipsis = exchange._computeellipsis( |
|
163 | 162 | repo, set(), common, knownrevs, newmatch |
|
164 | 163 | ) |
|
165 | 164 | if newvisit: |
|
166 | 165 | packer = changegroup.getbundler( |
|
167 | 166 | version, |
|
168 | 167 | repo, |
|
169 | 168 | matcher=newmatch, |
|
170 | 169 | ellipses=True, |
|
171 | 170 | shallow=False, |
|
172 | 171 | ellipsisroots=newellipsis, |
|
173 | 172 | fullnodes=newfull, |
|
174 | 173 | ) |
|
175 | 174 | cgdata = packer.generate(common, newvisit, False, b'narrow_widen') |
|
176 | 175 | |
|
177 | 176 | part = bundler.newpart(b'changegroup', data=cgdata) |
|
178 | 177 | part.addparam(b'version', version) |
|
179 | 178 | if scmutil.istreemanifest(repo): |
|
180 | 179 | part.addparam(b'treemanifest', b'1') |
|
181 | 180 | |
|
182 | 181 | |
|
183 | 182 | @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE)) |
|
184 | 183 | def _handlechangespec_2(op, inpart): |
|
185 | 184 | # XXX: This bundle2 handling is buggy and should be removed after hg5.2 is |
|
186 | 185 | # released. New servers will send a mandatory bundle2 part named |
|
187 | 186 | # 'Narrowspec' and will send specs as data instead of params. |
|
188 | 187 | # Refer to issue5952 and 6019 |
|
189 | 188 | includepats = set(inpart.params.get(_SPECPART_INCLUDE, b'').splitlines()) |
|
190 | 189 | excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, b'').splitlines()) |
|
191 | 190 | narrowspec.validatepatterns(includepats) |
|
192 | 191 | narrowspec.validatepatterns(excludepats) |
|
193 | 192 | |
|
194 | 193 | if not requirements.NARROW_REQUIREMENT in op.repo.requirements: |
|
195 | 194 | op.repo.requirements.add(requirements.NARROW_REQUIREMENT) |
|
196 | 195 | scmutil.writereporequirements(op.repo) |
|
197 | 196 | op.repo.setnarrowpats(includepats, excludepats) |
|
198 | 197 | narrowspec.copytoworkingcopy(op.repo) |
|
199 | 198 | |
|
200 | 199 | |
|
201 | 200 | @bundle2.parthandler(_RESSPECS) |
|
202 | 201 | def _handlenarrowspecs(op, inpart): |
|
203 | 202 | data = inpart.read() |
|
204 | 203 | inc, exc = data.split(b'\0') |
|
205 | 204 | includepats = set(inc.splitlines()) |
|
206 | 205 | excludepats = set(exc.splitlines()) |
|
207 | 206 | narrowspec.validatepatterns(includepats) |
|
208 | 207 | narrowspec.validatepatterns(excludepats) |
|
209 | 208 | |
|
210 | 209 | if requirements.NARROW_REQUIREMENT not in op.repo.requirements: |
|
211 | 210 | op.repo.requirements.add(requirements.NARROW_REQUIREMENT) |
|
212 | 211 | scmutil.writereporequirements(op.repo) |
|
213 | 212 | op.repo.setnarrowpats(includepats, excludepats) |
|
214 | 213 | narrowspec.copytoworkingcopy(op.repo) |
|
215 | 214 | |
|
216 | 215 | |
|
217 | 216 | @bundle2.parthandler(_CHANGESPECPART) |
|
218 | 217 | def _handlechangespec(op, inpart): |
|
219 | 218 | repo = op.repo |
|
220 | 219 | cl = repo.changelog |
|
221 | 220 | |
|
222 | 221 | # changesets which need to be stripped entirely. either they're no longer |
|
223 | 222 | # needed in the new narrow spec, or the server is sending a replacement |
|
224 | 223 | # in the changegroup part. |
|
225 | 224 | clkills = set() |
|
226 | 225 | |
|
227 | 226 | # A changespec part contains all the updates to ellipsis nodes |
|
228 | 227 | # that will happen as a result of widening or narrowing a |
|
229 | 228 | # repo. All the changes that this block encounters are ellipsis |
|
230 | 229 | # nodes or flags to kill an existing ellipsis. |
|
231 | 230 | chunksignal = changegroup.readexactly(inpart, 4) |
|
232 | 231 | while chunksignal != _DONESIGNAL: |
|
233 | 232 | if chunksignal == _KILLNODESIGNAL: |
|
234 | 233 | # a node used to be an ellipsis but isn't anymore |
|
235 | 234 | ck = changegroup.readexactly(inpart, 20) |
|
236 | 235 | if cl.hasnode(ck): |
|
237 | 236 | clkills.add(ck) |
|
238 | 237 | else: |
|
239 | 238 | raise error.Abort( |
|
240 | 239 | _(b'unexpected changespec node chunk type: %s') % chunksignal |
|
241 | 240 | ) |
|
242 | 241 | chunksignal = changegroup.readexactly(inpart, 4) |
|
243 | 242 | |
|
244 | 243 | if clkills: |
|
245 | 244 | # preserve bookmarks that repair.strip() would otherwise strip |
|
246 | 245 | op._bookmarksbackup = repo._bookmarks |
|
247 | 246 | |
|
248 | 247 | class dummybmstore(dict): |
|
249 | 248 | def applychanges(self, repo, tr, changes): |
|
250 | 249 | pass |
|
251 | 250 | |
|
252 | 251 | localrepo.localrepository._bookmarks.set(repo, dummybmstore()) |
|
253 | 252 | chgrpfile = repair.strip( |
|
254 | 253 | op.ui, repo, list(clkills), backup=True, topic=b'widen' |
|
255 | 254 | ) |
|
256 | 255 | if chgrpfile: |
|
257 | 256 | op._widen_uninterr = repo.ui.uninterruptible() |
|
258 | 257 | op._widen_uninterr.__enter__() |
|
259 | 258 | # presence of _widen_bundle attribute activates widen handler later |
|
260 | 259 | op._widen_bundle = chgrpfile |
|
261 | 260 | # Set the new narrowspec if we're widening. The setnewnarrowpats() method |
|
262 | 261 | # will currently always be there when using the core+narrowhg server, but |
|
263 | 262 | # other servers may include a changespec part even when not widening (e.g. |
|
264 | 263 | # because we're deepening a shallow repo). |
|
265 | 264 | if util.safehasattr(repo, 'setnewnarrowpats'): |
|
266 | 265 | repo.setnewnarrowpats() |
|
267 | 266 | |
|
268 | 267 | |
|
269 | 268 | def handlechangegroup_widen(op, inpart): |
|
270 | 269 | """Changegroup exchange handler which restores temporarily-stripped nodes""" |
|
271 | 270 | # We saved a bundle with stripped node data we must now restore. |
|
272 | 271 | # This approach is based on mercurial/repair.py@6ee26a53c111. |
|
273 | 272 | repo = op.repo |
|
274 | 273 | ui = op.ui |
|
275 | 274 | |
|
276 | 275 | chgrpfile = op._widen_bundle |
|
277 | 276 | del op._widen_bundle |
|
278 | 277 | vfs = repo.vfs |
|
279 | 278 | |
|
280 | 279 | ui.note(_(b"adding branch\n")) |
|
281 | 280 | f = vfs.open(chgrpfile, b"rb") |
|
282 | 281 | try: |
|
283 | 282 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) |
|
284 | 283 | # silence internal shuffling chatter |
|
285 | 284 | override = {(b'ui', b'quiet'): True} |
|
286 | 285 | if ui.verbose: |
|
287 | 286 | override = {} |
|
288 | 287 | with ui.configoverride(override): |
|
289 | 288 | if isinstance(gen, bundle2.unbundle20): |
|
290 | 289 | with repo.transaction(b'strip') as tr: |
|
291 | 290 | bundle2.processbundle(repo, gen, lambda: tr) |
|
292 | 291 | else: |
|
293 | 292 | gen.apply( |
|
294 | 293 | repo, b'strip', b'bundle:' + vfs.join(chgrpfile), True |
|
295 | 294 | ) |
|
296 | 295 | finally: |
|
297 | 296 | f.close() |
|
298 | 297 | |
|
299 | 298 | # remove undo files |
|
300 | 299 | for undovfs, undofile in repo.undofiles(): |
|
301 | 300 | try: |
|
302 | 301 | undovfs.unlink(undofile) |
|
303 | 302 | except OSError as e: |
|
304 | 303 | if e.errno != errno.ENOENT: |
|
305 | 304 | ui.warn( |
|
306 | 305 | _(b'error removing %s: %s\n') |
|
307 | 306 | % (undovfs.join(undofile), stringutil.forcebytestr(e)) |
|
308 | 307 | ) |
|
309 | 308 | |
|
310 | 309 | # Remove partial backup only if there were no exceptions |
|
311 | 310 | op._widen_uninterr.__exit__(None, None, None) |
|
312 | 311 | vfs.unlink(chgrpfile) |
|
313 | 312 | |
|
314 | 313 | |
|
315 | 314 | def setup(): |
|
316 | 315 | """Enable narrow repo support in bundle2-related extension points.""" |
|
317 | 316 | getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS |
|
318 | 317 | |
|
319 | 318 | getbundleargs[b'narrow'] = b'boolean' |
|
320 | 319 | getbundleargs[b'depth'] = b'plain' |
|
321 | 320 | getbundleargs[b'oldincludepats'] = b'csv' |
|
322 | 321 | getbundleargs[b'oldexcludepats'] = b'csv' |
|
323 | 322 | getbundleargs[b'known'] = b'csv' |
|
324 | 323 | |
|
325 | 324 | # Extend changegroup serving to handle requests from narrow clients. |
|
326 | 325 | origcgfn = exchange.getbundle2partsmapping[b'changegroup'] |
|
327 | 326 | |
|
328 | 327 | def wrappedcgfn(*args, **kwargs): |
|
329 | 328 | repo = args[1] |
|
330 | 329 | if repo.ui.has_section(_NARROWACL_SECTION): |
|
331 | 330 | kwargs = exchange.applynarrowacl(repo, kwargs) |
|
332 | 331 | |
|
333 | 332 | if kwargs.get('narrow', False) and repo.ui.configbool( |
|
334 | 333 | b'experimental', b'narrowservebrokenellipses' |
|
335 | 334 | ): |
|
336 | 335 | getbundlechangegrouppart_narrow(*args, **kwargs) |
|
337 | 336 | else: |
|
338 | 337 | origcgfn(*args, **kwargs) |
|
339 | 338 | |
|
340 | 339 | exchange.getbundle2partsmapping[b'changegroup'] = wrappedcgfn |
|
341 | 340 | |
|
342 | 341 | # Extend changegroup receiver so client can fixup after widen requests. |
|
343 | 342 | origcghandler = bundle2.parthandlermapping[b'changegroup'] |
|
344 | 343 | |
|
345 | 344 | def wrappedcghandler(op, inpart): |
|
346 | 345 | origcghandler(op, inpart) |
|
347 | 346 | if util.safehasattr(op, '_widen_bundle'): |
|
348 | 347 | handlechangegroup_widen(op, inpart) |
|
349 | 348 | if util.safehasattr(op, '_bookmarksbackup'): |
|
350 | 349 | localrepo.localrepository._bookmarks.set( |
|
351 | 350 | op.repo, op._bookmarksbackup |
|
352 | 351 | ) |
|
353 | 352 | del op._bookmarksbackup |
|
354 | 353 | |
|
355 | 354 | wrappedcghandler.params = origcghandler.params |
|
356 | 355 | bundle2.parthandlermapping[b'changegroup'] = wrappedcghandler |
@@ -1,680 +1,679 b'' | |||
|
1 | 1 | # narrowcommands.py - command modifications for narrowhg extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Google, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import itertools |
|
10 | 10 | import os |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | from mercurial.node import ( |
|
14 | 14 | hex, |
|
15 | nullid, | |
|
16 | 15 | short, |
|
17 | 16 | ) |
|
18 | 17 | from mercurial import ( |
|
19 | 18 | bundle2, |
|
20 | 19 | cmdutil, |
|
21 | 20 | commands, |
|
22 | 21 | discovery, |
|
23 | 22 | encoding, |
|
24 | 23 | error, |
|
25 | 24 | exchange, |
|
26 | 25 | extensions, |
|
27 | 26 | hg, |
|
28 | 27 | narrowspec, |
|
29 | 28 | pathutil, |
|
30 | 29 | pycompat, |
|
31 | 30 | registrar, |
|
32 | 31 | repair, |
|
33 | 32 | repoview, |
|
34 | 33 | requirements, |
|
35 | 34 | sparse, |
|
36 | 35 | util, |
|
37 | 36 | wireprototypes, |
|
38 | 37 | ) |
|
39 | 38 | from mercurial.utils import ( |
|
40 | 39 | urlutil, |
|
41 | 40 | ) |
|
42 | 41 | |
|
43 | 42 | table = {} |
|
44 | 43 | command = registrar.command(table) |
|
45 | 44 | |
|
46 | 45 | |
|
47 | 46 | def setup(): |
|
48 | 47 | """Wraps user-facing mercurial commands with narrow-aware versions.""" |
|
49 | 48 | |
|
50 | 49 | entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd) |
|
51 | 50 | entry[1].append( |
|
52 | 51 | (b'', b'narrow', None, _(b"create a narrow clone of select files")) |
|
53 | 52 | ) |
|
54 | 53 | entry[1].append( |
|
55 | 54 | ( |
|
56 | 55 | b'', |
|
57 | 56 | b'depth', |
|
58 | 57 | b'', |
|
59 | 58 | _(b"limit the history fetched by distance from heads"), |
|
60 | 59 | ) |
|
61 | 60 | ) |
|
62 | 61 | entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file"))) |
|
63 | 62 | # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit |
|
64 | 63 | if b'sparse' not in extensions.enabled(): |
|
65 | 64 | entry[1].append( |
|
66 | 65 | (b'', b'include', [], _(b"specifically fetch this file/directory")) |
|
67 | 66 | ) |
|
68 | 67 | entry[1].append( |
|
69 | 68 | ( |
|
70 | 69 | b'', |
|
71 | 70 | b'exclude', |
|
72 | 71 | [], |
|
73 | 72 | _(b"do not fetch this file/directory, even if included"), |
|
74 | 73 | ) |
|
75 | 74 | ) |
|
76 | 75 | |
|
77 | 76 | entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd) |
|
78 | 77 | entry[1].append( |
|
79 | 78 | ( |
|
80 | 79 | b'', |
|
81 | 80 | b'depth', |
|
82 | 81 | b'', |
|
83 | 82 | _(b"limit the history fetched by distance from heads"), |
|
84 | 83 | ) |
|
85 | 84 | ) |
|
86 | 85 | |
|
87 | 86 | extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd) |
|
88 | 87 | |
|
89 | 88 | |
|
90 | 89 | def clonenarrowcmd(orig, ui, repo, *args, **opts): |
|
91 | 90 | """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" |
|
92 | 91 | opts = pycompat.byteskwargs(opts) |
|
93 | 92 | wrappedextraprepare = util.nullcontextmanager() |
|
94 | 93 | narrowspecfile = opts[b'narrowspec'] |
|
95 | 94 | |
|
96 | 95 | if narrowspecfile: |
|
97 | 96 | filepath = os.path.join(encoding.getcwd(), narrowspecfile) |
|
98 | 97 | ui.status(_(b"reading narrowspec from '%s'\n") % filepath) |
|
99 | 98 | try: |
|
100 | 99 | fdata = util.readfile(filepath) |
|
101 | 100 | except IOError as inst: |
|
102 | 101 | raise error.Abort( |
|
103 | 102 | _(b"cannot read narrowspecs from '%s': %s") |
|
104 | 103 | % (filepath, encoding.strtolocal(inst.strerror)) |
|
105 | 104 | ) |
|
106 | 105 | |
|
107 | 106 | includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow') |
|
108 | 107 | if profiles: |
|
109 | 108 | raise error.ConfigError( |
|
110 | 109 | _( |
|
111 | 110 | b"cannot specify other files using '%include' in" |
|
112 | 111 | b" narrowspec" |
|
113 | 112 | ) |
|
114 | 113 | ) |
|
115 | 114 | |
|
116 | 115 | narrowspec.validatepatterns(includes) |
|
117 | 116 | narrowspec.validatepatterns(excludes) |
|
118 | 117 | |
|
119 | 118 | # narrowspec is passed so we should assume that user wants narrow clone |
|
120 | 119 | opts[b'narrow'] = True |
|
121 | 120 | opts[b'include'].extend(includes) |
|
122 | 121 | opts[b'exclude'].extend(excludes) |
|
123 | 122 | |
|
124 | 123 | if opts[b'narrow']: |
|
125 | 124 | |
|
126 | 125 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
127 | 126 | orig(pullop, kwargs) |
|
128 | 127 | |
|
129 | 128 | if opts.get(b'depth'): |
|
130 | 129 | kwargs[b'depth'] = opts[b'depth'] |
|
131 | 130 | |
|
132 | 131 | wrappedextraprepare = extensions.wrappedfunction( |
|
133 | 132 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen |
|
134 | 133 | ) |
|
135 | 134 | |
|
136 | 135 | with wrappedextraprepare: |
|
137 | 136 | return orig(ui, repo, *args, **pycompat.strkwargs(opts)) |
|
138 | 137 | |
|
139 | 138 | |
|
140 | 139 | def pullnarrowcmd(orig, ui, repo, *args, **opts): |
|
141 | 140 | """Wraps pull command to allow modifying narrow spec.""" |
|
142 | 141 | wrappedextraprepare = util.nullcontextmanager() |
|
143 | 142 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
144 | 143 | |
|
145 | 144 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
146 | 145 | orig(pullop, kwargs) |
|
147 | 146 | if opts.get('depth'): |
|
148 | 147 | kwargs[b'depth'] = opts['depth'] |
|
149 | 148 | |
|
150 | 149 | wrappedextraprepare = extensions.wrappedfunction( |
|
151 | 150 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen |
|
152 | 151 | ) |
|
153 | 152 | |
|
154 | 153 | with wrappedextraprepare: |
|
155 | 154 | return orig(ui, repo, *args, **opts) |
|
156 | 155 | |
|
157 | 156 | |
|
158 | 157 | def archivenarrowcmd(orig, ui, repo, *args, **opts): |
|
159 | 158 | """Wraps archive command to narrow the default includes.""" |
|
160 | 159 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
161 | 160 | repo_includes, repo_excludes = repo.narrowpats |
|
162 | 161 | includes = set(opts.get('include', [])) |
|
163 | 162 | excludes = set(opts.get('exclude', [])) |
|
164 | 163 | includes, excludes, unused_invalid = narrowspec.restrictpatterns( |
|
165 | 164 | includes, excludes, repo_includes, repo_excludes |
|
166 | 165 | ) |
|
167 | 166 | if includes: |
|
168 | 167 | opts['include'] = includes |
|
169 | 168 | if excludes: |
|
170 | 169 | opts['exclude'] = excludes |
|
171 | 170 | return orig(ui, repo, *args, **opts) |
|
172 | 171 | |
|
173 | 172 | |
|
174 | 173 | def pullbundle2extraprepare(orig, pullop, kwargs): |
|
175 | 174 | repo = pullop.repo |
|
176 | 175 | if requirements.NARROW_REQUIREMENT not in repo.requirements: |
|
177 | 176 | return orig(pullop, kwargs) |
|
178 | 177 | |
|
179 | 178 | if wireprototypes.NARROWCAP not in pullop.remote.capabilities(): |
|
180 | 179 | raise error.Abort(_(b"server does not support narrow clones")) |
|
181 | 180 | orig(pullop, kwargs) |
|
182 | 181 | kwargs[b'narrow'] = True |
|
183 | 182 | include, exclude = repo.narrowpats |
|
184 | 183 | kwargs[b'oldincludepats'] = include |
|
185 | 184 | kwargs[b'oldexcludepats'] = exclude |
|
186 | 185 | if include: |
|
187 | 186 | kwargs[b'includepats'] = include |
|
188 | 187 | if exclude: |
|
189 | 188 | kwargs[b'excludepats'] = exclude |
|
190 | 189 | # calculate known nodes only in ellipses cases because in non-ellipses cases |
|
191 | 190 | # we have all the nodes |
|
192 | 191 | if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities(): |
|
193 | 192 | kwargs[b'known'] = [ |
|
194 | 193 | hex(ctx.node()) |
|
195 | 194 | for ctx in repo.set(b'::%ln', pullop.common) |
|
196 | if ctx.node() != nullid | |
|
195 | if ctx.node() != repo.nullid | |
|
197 | 196 | ] |
|
198 | 197 | if not kwargs[b'known']: |
|
199 | 198 | # Mercurial serializes an empty list as '' and deserializes it as |
|
200 | 199 | # [''], so delete it instead to avoid handling the empty string on |
|
201 | 200 | # the server. |
|
202 | 201 | del kwargs[b'known'] |
|
203 | 202 | |
|
204 | 203 | |
|
205 | 204 | extensions.wrapfunction( |
|
206 | 205 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare |
|
207 | 206 | ) |
|
208 | 207 | |
|
209 | 208 | |
|
210 | 209 | def _narrow( |
|
211 | 210 | ui, |
|
212 | 211 | repo, |
|
213 | 212 | remote, |
|
214 | 213 | commoninc, |
|
215 | 214 | oldincludes, |
|
216 | 215 | oldexcludes, |
|
217 | 216 | newincludes, |
|
218 | 217 | newexcludes, |
|
219 | 218 | force, |
|
220 | 219 | backup, |
|
221 | 220 | ): |
|
222 | 221 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) |
|
223 | 222 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) |
|
224 | 223 | |
|
225 | 224 | # This is essentially doing "hg outgoing" to find all local-only |
|
226 | 225 | # commits. We will then check that the local-only commits don't |
|
227 | 226 | # have any changes to files that will be untracked. |
|
228 | 227 | unfi = repo.unfiltered() |
|
229 | 228 | outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) |
|
230 | 229 | ui.status(_(b'looking for local changes to affected paths\n')) |
|
231 | 230 | localnodes = [] |
|
232 | 231 | for n in itertools.chain(outgoing.missing, outgoing.excluded): |
|
233 | 232 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): |
|
234 | 233 | localnodes.append(n) |
|
235 | 234 | revstostrip = unfi.revs(b'descendants(%ln)', localnodes) |
|
236 | 235 | hiddenrevs = repoview.filterrevs(repo, b'visible') |
|
237 | 236 | visibletostrip = list( |
|
238 | 237 | repo.changelog.node(r) for r in (revstostrip - hiddenrevs) |
|
239 | 238 | ) |
|
240 | 239 | if visibletostrip: |
|
241 | 240 | ui.status( |
|
242 | 241 | _( |
|
243 | 242 | b'The following changeset(s) or their ancestors have ' |
|
244 | 243 | b'local changes not on the remote:\n' |
|
245 | 244 | ) |
|
246 | 245 | ) |
|
247 | 246 | maxnodes = 10 |
|
248 | 247 | if ui.verbose or len(visibletostrip) <= maxnodes: |
|
249 | 248 | for n in visibletostrip: |
|
250 | 249 | ui.status(b'%s\n' % short(n)) |
|
251 | 250 | else: |
|
252 | 251 | for n in visibletostrip[:maxnodes]: |
|
253 | 252 | ui.status(b'%s\n' % short(n)) |
|
254 | 253 | ui.status( |
|
255 | 254 | _(b'...and %d more, use --verbose to list all\n') |
|
256 | 255 | % (len(visibletostrip) - maxnodes) |
|
257 | 256 | ) |
|
258 | 257 | if not force: |
|
259 | 258 | raise error.StateError( |
|
260 | 259 | _(b'local changes found'), |
|
261 | 260 | hint=_(b'use --force-delete-local-changes to ignore'), |
|
262 | 261 | ) |
|
263 | 262 | |
|
264 | 263 | with ui.uninterruptible(): |
|
265 | 264 | if revstostrip: |
|
266 | 265 | tostrip = [unfi.changelog.node(r) for r in revstostrip] |
|
267 | 266 | if repo[b'.'].node() in tostrip: |
|
268 | 267 | # stripping working copy, so move to a different commit first |
|
269 | 268 | urev = max( |
|
270 | 269 | repo.revs( |
|
271 | 270 | b'(::%n) - %ln + null', |
|
272 | 271 | repo[b'.'].node(), |
|
273 | 272 | visibletostrip, |
|
274 | 273 | ) |
|
275 | 274 | ) |
|
276 | 275 | hg.clean(repo, urev) |
|
277 | 276 | overrides = {(b'devel', b'strip-obsmarkers'): False} |
|
278 | 277 | with ui.configoverride(overrides, b'narrow'): |
|
279 | 278 | repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) |
|
280 | 279 | |
|
281 | 280 | todelete = [] |
|
282 | 281 | for t, f, f2, size in repo.store.datafiles(): |
|
283 | 282 | if f.startswith(b'data/'): |
|
284 | 283 | file = f[5:-2] |
|
285 | 284 | if not newmatch(file): |
|
286 | 285 | todelete.append(f) |
|
287 | 286 | elif f.startswith(b'meta/'): |
|
288 | 287 | dir = f[5:-13] |
|
289 | 288 | dirs = sorted(pathutil.dirs({dir})) + [dir] |
|
290 | 289 | include = True |
|
291 | 290 | for d in dirs: |
|
292 | 291 | visit = newmatch.visitdir(d) |
|
293 | 292 | if not visit: |
|
294 | 293 | include = False |
|
295 | 294 | break |
|
296 | 295 | if visit == b'all': |
|
297 | 296 | break |
|
298 | 297 | if not include: |
|
299 | 298 | todelete.append(f) |
|
300 | 299 | |
|
301 | 300 | repo.destroying() |
|
302 | 301 | |
|
303 | 302 | with repo.transaction(b'narrowing'): |
|
304 | 303 | # Update narrowspec before removing revlogs, so repo won't be |
|
305 | 304 | # corrupt in case of crash |
|
306 | 305 | repo.setnarrowpats(newincludes, newexcludes) |
|
307 | 306 | |
|
308 | 307 | for f in todelete: |
|
309 | 308 | ui.status(_(b'deleting %s\n') % f) |
|
310 | 309 | util.unlinkpath(repo.svfs.join(f)) |
|
311 | 310 | repo.store.markremoved(f) |
|
312 | 311 | |
|
313 | 312 | narrowspec.updateworkingcopy(repo, assumeclean=True) |
|
314 | 313 | narrowspec.copytoworkingcopy(repo) |
|
315 | 314 | |
|
316 | 315 | repo.destroyed() |
|
317 | 316 | |
|
318 | 317 | |
|
319 | 318 | def _widen( |
|
320 | 319 | ui, |
|
321 | 320 | repo, |
|
322 | 321 | remote, |
|
323 | 322 | commoninc, |
|
324 | 323 | oldincludes, |
|
325 | 324 | oldexcludes, |
|
326 | 325 | newincludes, |
|
327 | 326 | newexcludes, |
|
328 | 327 | ): |
|
329 | 328 | # for now we assume that if a server has ellipses enabled, we will be |
|
330 | 329 | # exchanging ellipses nodes. In future we should add ellipses as a client |
|
331 | 330 | # side requirement (maybe) to distinguish a client is shallow or not and |
|
332 | 331 | # then send that information to server whether we want ellipses or not. |
|
333 | 332 | # Theoretically a non-ellipses repo should be able to use narrow |
|
334 | 333 | # functionality from an ellipses enabled server |
|
335 | 334 | remotecap = remote.capabilities() |
|
336 | 335 | ellipsesremote = any( |
|
337 | 336 | cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP |
|
338 | 337 | ) |
|
339 | 338 | |
|
340 | 339 | # check whether we are talking to a server which supports old version of |
|
341 | 340 | # ellipses capabilities |
|
342 | 341 | isoldellipses = ( |
|
343 | 342 | ellipsesremote |
|
344 | 343 | and wireprototypes.ELLIPSESCAP1 in remotecap |
|
345 | 344 | and wireprototypes.ELLIPSESCAP not in remotecap |
|
346 | 345 | ) |
|
347 | 346 | |
|
348 | 347 | def pullbundle2extraprepare_widen(orig, pullop, kwargs): |
|
349 | 348 | orig(pullop, kwargs) |
|
350 | 349 | # The old{in,ex}cludepats have already been set by orig() |
|
351 | 350 | kwargs[b'includepats'] = newincludes |
|
352 | 351 | kwargs[b'excludepats'] = newexcludes |
|
353 | 352 | |
|
354 | 353 | wrappedextraprepare = extensions.wrappedfunction( |
|
355 | 354 | exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen |
|
356 | 355 | ) |
|
357 | 356 | |
|
358 | 357 | # define a function that narrowbundle2 can call after creating the |
|
359 | 358 | # backup bundle, but before applying the bundle from the server |
|
360 | 359 | def setnewnarrowpats(): |
|
361 | 360 | repo.setnarrowpats(newincludes, newexcludes) |
|
362 | 361 | |
|
363 | 362 | repo.setnewnarrowpats = setnewnarrowpats |
|
364 | 363 | # silence the devel-warning of applying an empty changegroup |
|
365 | 364 | overrides = {(b'devel', b'all-warnings'): False} |
|
366 | 365 | |
|
367 | 366 | common = commoninc[0] |
|
368 | 367 | with ui.uninterruptible(): |
|
369 | 368 | if ellipsesremote: |
|
370 | 369 | ds = repo.dirstate |
|
371 | 370 | p1, p2 = ds.p1(), ds.p2() |
|
372 | 371 | with ds.parentchange(): |
|
373 | ds.setparents(nullid, nullid) | |
|
372 | ds.setparents(repo.nullid, repo.nullid) | |
|
374 | 373 | if isoldellipses: |
|
375 | 374 | with wrappedextraprepare: |
|
376 | 375 | exchange.pull(repo, remote, heads=common) |
|
377 | 376 | else: |
|
378 | 377 | known = [] |
|
379 | 378 | if ellipsesremote: |
|
380 | 379 | known = [ |
|
381 | 380 | ctx.node() |
|
382 | 381 | for ctx in repo.set(b'::%ln', common) |
|
383 | if ctx.node() != nullid | |
|
382 | if ctx.node() != repo.nullid | |
|
384 | 383 | ] |
|
385 | 384 | with remote.commandexecutor() as e: |
|
386 | 385 | bundle = e.callcommand( |
|
387 | 386 | b'narrow_widen', |
|
388 | 387 | { |
|
389 | 388 | b'oldincludes': oldincludes, |
|
390 | 389 | b'oldexcludes': oldexcludes, |
|
391 | 390 | b'newincludes': newincludes, |
|
392 | 391 | b'newexcludes': newexcludes, |
|
393 | 392 | b'cgversion': b'03', |
|
394 | 393 | b'commonheads': common, |
|
395 | 394 | b'known': known, |
|
396 | 395 | b'ellipses': ellipsesremote, |
|
397 | 396 | }, |
|
398 | 397 | ).result() |
|
399 | 398 | |
|
400 | 399 | trmanager = exchange.transactionmanager( |
|
401 | 400 | repo, b'widen', remote.url() |
|
402 | 401 | ) |
|
403 | 402 | with trmanager, repo.ui.configoverride(overrides, b'widen'): |
|
404 | 403 | op = bundle2.bundleoperation( |
|
405 | 404 | repo, trmanager.transaction, source=b'widen' |
|
406 | 405 | ) |
|
407 | 406 | # TODO: we should catch error.Abort here |
|
408 | 407 | bundle2.processbundle(repo, bundle, op=op) |
|
409 | 408 | |
|
410 | 409 | if ellipsesremote: |
|
411 | 410 | with ds.parentchange(): |
|
412 | 411 | ds.setparents(p1, p2) |
|
413 | 412 | |
|
414 | 413 | with repo.transaction(b'widening'): |
|
415 | 414 | repo.setnewnarrowpats() |
|
416 | 415 | narrowspec.updateworkingcopy(repo) |
|
417 | 416 | narrowspec.copytoworkingcopy(repo) |
|
418 | 417 | |
|
419 | 418 | |
|
420 | 419 | # TODO(rdamazio): Make new matcher format and update description |
|
421 | 420 | @command( |
|
422 | 421 | b'tracked', |
|
423 | 422 | [ |
|
424 | 423 | (b'', b'addinclude', [], _(b'new paths to include')), |
|
425 | 424 | (b'', b'removeinclude', [], _(b'old paths to no longer include')), |
|
426 | 425 | ( |
|
427 | 426 | b'', |
|
428 | 427 | b'auto-remove-includes', |
|
429 | 428 | False, |
|
430 | 429 | _(b'automatically choose unused includes to remove'), |
|
431 | 430 | ), |
|
432 | 431 | (b'', b'addexclude', [], _(b'new paths to exclude')), |
|
433 | 432 | (b'', b'import-rules', b'', _(b'import narrowspecs from a file')), |
|
434 | 433 | (b'', b'removeexclude', [], _(b'old paths to no longer exclude')), |
|
435 | 434 | ( |
|
436 | 435 | b'', |
|
437 | 436 | b'clear', |
|
438 | 437 | False, |
|
439 | 438 | _(b'whether to replace the existing narrowspec'), |
|
440 | 439 | ), |
|
441 | 440 | ( |
|
442 | 441 | b'', |
|
443 | 442 | b'force-delete-local-changes', |
|
444 | 443 | False, |
|
445 | 444 | _(b'forces deletion of local changes when narrowing'), |
|
446 | 445 | ), |
|
447 | 446 | ( |
|
448 | 447 | b'', |
|
449 | 448 | b'backup', |
|
450 | 449 | True, |
|
451 | 450 | _(b'back up local changes when narrowing'), |
|
452 | 451 | ), |
|
453 | 452 | ( |
|
454 | 453 | b'', |
|
455 | 454 | b'update-working-copy', |
|
456 | 455 | False, |
|
457 | 456 | _(b'update working copy when the store has changed'), |
|
458 | 457 | ), |
|
459 | 458 | ] |
|
460 | 459 | + commands.remoteopts, |
|
461 | 460 | _(b'[OPTIONS]... [REMOTE]'), |
|
462 | 461 | inferrepo=True, |
|
463 | 462 | helpcategory=command.CATEGORY_MAINTENANCE, |
|
464 | 463 | ) |
|
465 | 464 | def trackedcmd(ui, repo, remotepath=None, *pats, **opts): |
|
466 | 465 | """show or change the current narrowspec |
|
467 | 466 | |
|
468 | 467 | With no argument, shows the current narrowspec entries, one per line. Each |
|
469 | 468 | line will be prefixed with 'I' or 'X' for included or excluded patterns, |
|
470 | 469 | respectively. |
|
471 | 470 | |
|
472 | 471 | The narrowspec is comprised of expressions to match remote files and/or |
|
473 | 472 | directories that should be pulled into your client. |
|
474 | 473 | The narrowspec has *include* and *exclude* expressions, with excludes always |
|
475 | 474 | trumping includes: that is, if a file matches an exclude expression, it will |
|
476 | 475 | be excluded even if it also matches an include expression. |
|
477 | 476 | Excluding files that were never included has no effect. |
|
478 | 477 | |
|
479 | 478 | Each included or excluded entry is in the format described by |
|
480 | 479 | 'hg help patterns'. |
|
481 | 480 | |
|
482 | 481 | The options allow you to add or remove included and excluded expressions. |
|
483 | 482 | |
|
484 | 483 | If --clear is specified, then all previous includes and excludes are DROPPED |
|
485 | 484 | and replaced by the new ones specified to --addinclude and --addexclude. |
|
486 | 485 | If --clear is specified without any further options, the narrowspec will be |
|
487 | 486 | empty and will not match any files. |
|
488 | 487 | |
|
489 | 488 | If --auto-remove-includes is specified, then those includes that don't match |
|
490 | 489 | any files modified by currently visible local commits (those not shared by |
|
491 | 490 | the remote) will be added to the set of explicitly specified includes to |
|
492 | 491 | remove. |
|
493 | 492 | |
|
494 | 493 | --import-rules accepts a path to a file containing rules, allowing you to |
|
495 | 494 | add --addinclude, --addexclude rules in bulk. Like the other include and |
|
496 | 495 | exclude switches, the changes are applied immediately. |
|
497 | 496 | """ |
|
498 | 497 | opts = pycompat.byteskwargs(opts) |
|
499 | 498 | if requirements.NARROW_REQUIREMENT not in repo.requirements: |
|
500 | 499 | raise error.InputError( |
|
501 | 500 | _( |
|
502 | 501 | b'the tracked command is only supported on ' |
|
503 | 502 | b'repositories cloned with --narrow' |
|
504 | 503 | ) |
|
505 | 504 | ) |
|
506 | 505 | |
|
507 | 506 | # Before supporting, decide whether it "hg tracked --clear" should mean |
|
508 | 507 | # tracking no paths or all paths. |
|
509 | 508 | if opts[b'clear']: |
|
510 | 509 | raise error.InputError(_(b'the --clear option is not yet supported')) |
|
511 | 510 | |
|
512 | 511 | # import rules from a file |
|
513 | 512 | newrules = opts.get(b'import_rules') |
|
514 | 513 | if newrules: |
|
515 | 514 | try: |
|
516 | 515 | filepath = os.path.join(encoding.getcwd(), newrules) |
|
517 | 516 | fdata = util.readfile(filepath) |
|
518 | 517 | except IOError as inst: |
|
519 | 518 | raise error.StorageError( |
|
520 | 519 | _(b"cannot read narrowspecs from '%s': %s") |
|
521 | 520 | % (filepath, encoding.strtolocal(inst.strerror)) |
|
522 | 521 | ) |
|
523 | 522 | includepats, excludepats, profiles = sparse.parseconfig( |
|
524 | 523 | ui, fdata, b'narrow' |
|
525 | 524 | ) |
|
526 | 525 | if profiles: |
|
527 | 526 | raise error.InputError( |
|
528 | 527 | _( |
|
529 | 528 | b"including other spec files using '%include' " |
|
530 | 529 | b"is not supported in narrowspec" |
|
531 | 530 | ) |
|
532 | 531 | ) |
|
533 | 532 | opts[b'addinclude'].extend(includepats) |
|
534 | 533 | opts[b'addexclude'].extend(excludepats) |
|
535 | 534 | |
|
536 | 535 | addedincludes = narrowspec.parsepatterns(opts[b'addinclude']) |
|
537 | 536 | removedincludes = narrowspec.parsepatterns(opts[b'removeinclude']) |
|
538 | 537 | addedexcludes = narrowspec.parsepatterns(opts[b'addexclude']) |
|
539 | 538 | removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude']) |
|
540 | 539 | autoremoveincludes = opts[b'auto_remove_includes'] |
|
541 | 540 | |
|
542 | 541 | update_working_copy = opts[b'update_working_copy'] |
|
543 | 542 | only_show = not ( |
|
544 | 543 | addedincludes |
|
545 | 544 | or removedincludes |
|
546 | 545 | or addedexcludes |
|
547 | 546 | or removedexcludes |
|
548 | 547 | or newrules |
|
549 | 548 | or autoremoveincludes |
|
550 | 549 | or update_working_copy |
|
551 | 550 | ) |
|
552 | 551 | |
|
553 | 552 | oldincludes, oldexcludes = repo.narrowpats |
|
554 | 553 | |
|
555 | 554 | # filter the user passed additions and deletions into actual additions and |
|
556 | 555 | # deletions of excludes and includes |
|
557 | 556 | addedincludes -= oldincludes |
|
558 | 557 | removedincludes &= oldincludes |
|
559 | 558 | addedexcludes -= oldexcludes |
|
560 | 559 | removedexcludes &= oldexcludes |
|
561 | 560 | |
|
562 | 561 | widening = addedincludes or removedexcludes |
|
563 | 562 | narrowing = removedincludes or addedexcludes |
|
564 | 563 | |
|
565 | 564 | # Only print the current narrowspec. |
|
566 | 565 | if only_show: |
|
567 | 566 | ui.pager(b'tracked') |
|
568 | 567 | fm = ui.formatter(b'narrow', opts) |
|
569 | 568 | for i in sorted(oldincludes): |
|
570 | 569 | fm.startitem() |
|
571 | 570 | fm.write(b'status', b'%s ', b'I', label=b'narrow.included') |
|
572 | 571 | fm.write(b'pat', b'%s\n', i, label=b'narrow.included') |
|
573 | 572 | for i in sorted(oldexcludes): |
|
574 | 573 | fm.startitem() |
|
575 | 574 | fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded') |
|
576 | 575 | fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded') |
|
577 | 576 | fm.end() |
|
578 | 577 | return 0 |
|
579 | 578 | |
|
580 | 579 | if update_working_copy: |
|
581 | 580 | with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'): |
|
582 | 581 | narrowspec.updateworkingcopy(repo) |
|
583 | 582 | narrowspec.copytoworkingcopy(repo) |
|
584 | 583 | return 0 |
|
585 | 584 | |
|
586 | 585 | if not (widening or narrowing or autoremoveincludes): |
|
587 | 586 | ui.status(_(b"nothing to widen or narrow\n")) |
|
588 | 587 | return 0 |
|
589 | 588 | |
|
590 | 589 | with repo.wlock(), repo.lock(): |
|
591 | 590 | cmdutil.bailifchanged(repo) |
|
592 | 591 | |
|
593 | 592 | # Find the revisions we have in common with the remote. These will |
|
594 | 593 | # be used for finding local-only changes for narrowing. They will |
|
595 | 594 | # also define the set of revisions to update for widening. |
|
596 | 595 | r = urlutil.get_unique_pull_path(b'tracked', repo, ui, remotepath) |
|
597 | 596 | url, branches = r |
|
598 | 597 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url)) |
|
599 | 598 | remote = hg.peer(repo, opts, url) |
|
600 | 599 | |
|
601 | 600 | try: |
|
602 | 601 | # check narrow support before doing anything if widening needs to be |
|
603 | 602 | # performed. In future we should also abort if client is ellipses and |
|
604 | 603 | # server does not support ellipses |
|
605 | 604 | if ( |
|
606 | 605 | widening |
|
607 | 606 | and wireprototypes.NARROWCAP not in remote.capabilities() |
|
608 | 607 | ): |
|
609 | 608 | raise error.Abort(_(b"server does not support narrow clones")) |
|
610 | 609 | |
|
611 | 610 | commoninc = discovery.findcommonincoming(repo, remote) |
|
612 | 611 | |
|
613 | 612 | if autoremoveincludes: |
|
614 | 613 | outgoing = discovery.findcommonoutgoing( |
|
615 | 614 | repo, remote, commoninc=commoninc |
|
616 | 615 | ) |
|
617 | 616 | ui.status(_(b'looking for unused includes to remove\n')) |
|
618 | 617 | localfiles = set() |
|
619 | 618 | for n in itertools.chain(outgoing.missing, outgoing.excluded): |
|
620 | 619 | localfiles.update(repo[n].files()) |
|
621 | 620 | suggestedremovals = [] |
|
622 | 621 | for include in sorted(oldincludes): |
|
623 | 622 | match = narrowspec.match(repo.root, [include], oldexcludes) |
|
624 | 623 | if not any(match(f) for f in localfiles): |
|
625 | 624 | suggestedremovals.append(include) |
|
626 | 625 | if suggestedremovals: |
|
627 | 626 | for s in suggestedremovals: |
|
628 | 627 | ui.status(b'%s\n' % s) |
|
629 | 628 | if ( |
|
630 | 629 | ui.promptchoice( |
|
631 | 630 | _( |
|
632 | 631 | b'remove these unused includes (yn)?' |
|
633 | 632 | b'$$ &Yes $$ &No' |
|
634 | 633 | ) |
|
635 | 634 | ) |
|
636 | 635 | == 0 |
|
637 | 636 | ): |
|
638 | 637 | removedincludes.update(suggestedremovals) |
|
639 | 638 | narrowing = True |
|
640 | 639 | else: |
|
641 | 640 | ui.status(_(b'found no unused includes\n')) |
|
642 | 641 | |
|
643 | 642 | if narrowing: |
|
644 | 643 | newincludes = oldincludes - removedincludes |
|
645 | 644 | newexcludes = oldexcludes | addedexcludes |
|
646 | 645 | _narrow( |
|
647 | 646 | ui, |
|
648 | 647 | repo, |
|
649 | 648 | remote, |
|
650 | 649 | commoninc, |
|
651 | 650 | oldincludes, |
|
652 | 651 | oldexcludes, |
|
653 | 652 | newincludes, |
|
654 | 653 | newexcludes, |
|
655 | 654 | opts[b'force_delete_local_changes'], |
|
656 | 655 | opts[b'backup'], |
|
657 | 656 | ) |
|
658 | 657 | # _narrow() updated the narrowspec and _widen() below needs to |
|
659 | 658 | # use the updated values as its base (otherwise removed includes |
|
660 | 659 | # and addedexcludes will be lost in the resulting narrowspec) |
|
661 | 660 | oldincludes = newincludes |
|
662 | 661 | oldexcludes = newexcludes |
|
663 | 662 | |
|
664 | 663 | if widening: |
|
665 | 664 | newincludes = oldincludes | addedincludes |
|
666 | 665 | newexcludes = oldexcludes - removedexcludes |
|
667 | 666 | _widen( |
|
668 | 667 | ui, |
|
669 | 668 | repo, |
|
670 | 669 | remote, |
|
671 | 670 | commoninc, |
|
672 | 671 | oldincludes, |
|
673 | 672 | oldexcludes, |
|
674 | 673 | newincludes, |
|
675 | 674 | newexcludes, |
|
676 | 675 | ) |
|
677 | 676 | finally: |
|
678 | 677 | remote.close() |
|
679 | 678 | |
|
680 | 679 | return 0 |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now