Show More
@@ -1,2640 +1,2639 b'' | |||
|
1 | 1 | # histedit.py - interactive history editing for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2009 Augie Fackler <raf@durin42.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """interactive history editing |
|
8 | 8 | |
|
9 | 9 | With this extension installed, Mercurial gains one new command: histedit. Usage |
|
10 | 10 | is as follows, assuming the following history:: |
|
11 | 11 | |
|
12 | 12 | @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42 |
|
13 | 13 | | Add delta |
|
14 | 14 | | |
|
15 | 15 | o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42 |
|
16 | 16 | | Add gamma |
|
17 | 17 | | |
|
18 | 18 | o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42 |
|
19 | 19 | | Add beta |
|
20 | 20 | | |
|
21 | 21 | o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42 |
|
22 | 22 | Add alpha |
|
23 | 23 | |
|
24 | 24 | If you were to run ``hg histedit c561b4e977df``, you would see the following |
|
25 | 25 | file open in your editor:: |
|
26 | 26 | |
|
27 | 27 | pick c561b4e977df Add beta |
|
28 | 28 | pick 030b686bedc4 Add gamma |
|
29 | 29 | pick 7c2fd3b9020c Add delta |
|
30 | 30 | |
|
31 | 31 | # Edit history between c561b4e977df and 7c2fd3b9020c |
|
32 | 32 | # |
|
33 | 33 | # Commits are listed from least to most recent |
|
34 | 34 | # |
|
35 | 35 | # Commands: |
|
36 | 36 | # p, pick = use commit |
|
37 | 37 | # e, edit = use commit, but stop for amending |
|
38 | 38 | # f, fold = use commit, but combine it with the one above |
|
39 | 39 | # r, roll = like fold, but discard this commit's description and date |
|
40 | 40 | # d, drop = remove commit from history |
|
41 | 41 | # m, mess = edit commit message without changing commit content |
|
42 | 42 | # b, base = checkout changeset and apply further changesets from there |
|
43 | 43 | # |
|
44 | 44 | |
|
45 | 45 | In this file, lines beginning with ``#`` are ignored. You must specify a rule |
|
46 | 46 | for each revision in your history. For example, if you had meant to add gamma |
|
47 | 47 | before beta, and then wanted to add delta in the same revision as beta, you |
|
48 | 48 | would reorganize the file to look like this:: |
|
49 | 49 | |
|
50 | 50 | pick 030b686bedc4 Add gamma |
|
51 | 51 | pick c561b4e977df Add beta |
|
52 | 52 | fold 7c2fd3b9020c Add delta |
|
53 | 53 | |
|
54 | 54 | # Edit history between c561b4e977df and 7c2fd3b9020c |
|
55 | 55 | # |
|
56 | 56 | # Commits are listed from least to most recent |
|
57 | 57 | # |
|
58 | 58 | # Commands: |
|
59 | 59 | # p, pick = use commit |
|
60 | 60 | # e, edit = use commit, but stop for amending |
|
61 | 61 | # f, fold = use commit, but combine it with the one above |
|
62 | 62 | # r, roll = like fold, but discard this commit's description and date |
|
63 | 63 | # d, drop = remove commit from history |
|
64 | 64 | # m, mess = edit commit message without changing commit content |
|
65 | 65 | # b, base = checkout changeset and apply further changesets from there |
|
66 | 66 | # |
|
67 | 67 | |
|
68 | 68 | At which point you close the editor and ``histedit`` starts working. When you |
|
69 | 69 | specify a ``fold`` operation, ``histedit`` will open an editor when it folds |
|
70 | 70 | those revisions together, offering you a chance to clean up the commit message:: |
|
71 | 71 | |
|
72 | 72 | Add beta |
|
73 | 73 | *** |
|
74 | 74 | Add delta |
|
75 | 75 | |
|
76 | 76 | Edit the commit message to your liking, then close the editor. The date used |
|
77 | 77 | for the commit will be the later of the two commits' dates. For this example, |
|
78 | 78 | let's assume that the commit message was changed to ``Add beta and delta.`` |
|
79 | 79 | After histedit has run and had a chance to remove any old or temporary |
|
80 | 80 | revisions it needed, the history looks like this:: |
|
81 | 81 | |
|
82 | 82 | @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42 |
|
83 | 83 | | Add beta and delta. |
|
84 | 84 | | |
|
85 | 85 | o 1 081603921c3f 2009-04-27 18:04 -0500 durin42 |
|
86 | 86 | | Add gamma |
|
87 | 87 | | |
|
88 | 88 | o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42 |
|
89 | 89 | Add alpha |
|
90 | 90 | |
|
91 | 91 | Note that ``histedit`` does *not* remove any revisions (even its own temporary |
|
92 | 92 | ones) until after it has completed all the editing operations, so it will |
|
93 | 93 | probably perform several strip operations when it's done. For the above example, |
|
94 | 94 | it had to run strip twice. Strip can be slow depending on a variety of factors, |
|
95 | 95 | so you might need to be a little patient. You can choose to keep the original |
|
96 | 96 | revisions by passing the ``--keep`` flag. |
|
97 | 97 | |
|
98 | 98 | The ``edit`` operation will drop you back to a command prompt, |
|
99 | 99 | allowing you to edit files freely, or even use ``hg record`` to commit |
|
100 | 100 | some changes as a separate commit. When you're done, any remaining |
|
101 | 101 | uncommitted changes will be committed as well. When done, run ``hg |
|
102 | 102 | histedit --continue`` to finish this step. If there are uncommitted |
|
103 | 103 | changes, you'll be prompted for a new commit message, but the default |
|
104 | 104 | commit message will be the original message for the ``edit`` ed |
|
105 | 105 | revision, and the date of the original commit will be preserved. |
|
106 | 106 | |
|
107 | 107 | The ``message`` operation will give you a chance to revise a commit |
|
108 | 108 | message without changing the contents. It's a shortcut for doing |
|
109 | 109 | ``edit`` immediately followed by `hg histedit --continue``. |
|
110 | 110 | |
|
111 | 111 | If ``histedit`` encounters a conflict when moving a revision (while |
|
112 | 112 | handling ``pick`` or ``fold``), it'll stop in a similar manner to |
|
113 | 113 | ``edit`` with the difference that it won't prompt you for a commit |
|
114 | 114 | message when done. If you decide at this point that you don't like how |
|
115 | 115 | much work it will be to rearrange history, or that you made a mistake, |
|
116 | 116 | you can use ``hg histedit --abort`` to abandon the new changes you |
|
117 | 117 | have made and return to the state before you attempted to edit your |
|
118 | 118 | history. |
|
119 | 119 | |
|
120 | 120 | If we clone the histedit-ed example repository above and add four more |
|
121 | 121 | changes, such that we have the following history:: |
|
122 | 122 | |
|
123 | 123 | @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan |
|
124 | 124 | | Add theta |
|
125 | 125 | | |
|
126 | 126 | o 5 140988835471 2009-04-27 18:04 -0500 stefan |
|
127 | 127 | | Add eta |
|
128 | 128 | | |
|
129 | 129 | o 4 122930637314 2009-04-27 18:04 -0500 stefan |
|
130 | 130 | | Add zeta |
|
131 | 131 | | |
|
132 | 132 | o 3 836302820282 2009-04-27 18:04 -0500 stefan |
|
133 | 133 | | Add epsilon |
|
134 | 134 | | |
|
135 | 135 | o 2 989b4d060121 2009-04-27 18:04 -0500 durin42 |
|
136 | 136 | | Add beta and delta. |
|
137 | 137 | | |
|
138 | 138 | o 1 081603921c3f 2009-04-27 18:04 -0500 durin42 |
|
139 | 139 | | Add gamma |
|
140 | 140 | | |
|
141 | 141 | o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42 |
|
142 | 142 | Add alpha |
|
143 | 143 | |
|
144 | 144 | If you run ``hg histedit --outgoing`` on the clone then it is the same |
|
145 | 145 | as running ``hg histedit 836302820282``. If you need plan to push to a |
|
146 | 146 | repository that Mercurial does not detect to be related to the source |
|
147 | 147 | repo, you can add a ``--force`` option. |
|
148 | 148 | |
|
149 | 149 | Config |
|
150 | 150 | ------ |
|
151 | 151 | |
|
152 | 152 | Histedit rule lines are truncated to 80 characters by default. You |
|
153 | 153 | can customize this behavior by setting a different length in your |
|
154 | 154 | configuration file:: |
|
155 | 155 | |
|
156 | 156 | [histedit] |
|
157 | 157 | linelen = 120 # truncate rule lines at 120 characters |
|
158 | 158 | |
|
159 | 159 | The summary of a change can be customized as well:: |
|
160 | 160 | |
|
161 | 161 | [histedit] |
|
162 | 162 | summary-template = '{rev} {bookmarks} {desc|firstline}' |
|
163 | 163 | |
|
164 | 164 | The customized summary should be kept short enough that rule lines |
|
165 | 165 | will fit in the configured line length. See above if that requires |
|
166 | 166 | customization. |
|
167 | 167 | |
|
168 | 168 | ``hg histedit`` attempts to automatically choose an appropriate base |
|
169 | 169 | revision to use. To change which base revision is used, define a |
|
170 | 170 | revset in your configuration file:: |
|
171 | 171 | |
|
172 | 172 | [histedit] |
|
173 | 173 | defaultrev = only(.) & draft() |
|
174 | 174 | |
|
175 | 175 | By default each edited revision needs to be present in histedit commands. |
|
176 | 176 | To remove revision you need to use ``drop`` operation. You can configure |
|
177 | 177 | the drop to be implicit for missing commits by adding:: |
|
178 | 178 | |
|
179 | 179 | [histedit] |
|
180 | 180 | dropmissing = True |
|
181 | 181 | |
|
182 | 182 | By default, histedit will close the transaction after each action. For |
|
183 | 183 | performance purposes, you can configure histedit to use a single transaction |
|
184 | 184 | across the entire histedit. WARNING: This setting introduces a significant risk |
|
185 | 185 | of losing the work you've done in a histedit if the histedit aborts |
|
186 | 186 | unexpectedly:: |
|
187 | 187 | |
|
188 | 188 | [histedit] |
|
189 | 189 | singletransaction = True |
|
190 | 190 | |
|
191 | 191 | """ |
|
192 | 192 | |
|
193 | 193 | from __future__ import absolute_import |
|
194 | 194 | |
|
195 | 195 | # chistedit dependencies that are not available everywhere |
|
196 | 196 | try: |
|
197 | 197 | import fcntl |
|
198 | 198 | import termios |
|
199 | 199 | except ImportError: |
|
200 | 200 | fcntl = None |
|
201 | 201 | termios = None |
|
202 | 202 | |
|
203 | 203 | import functools |
|
204 | 204 | import os |
|
205 | 205 | import struct |
|
206 | 206 | |
|
207 | 207 | from mercurial.i18n import _ |
|
208 | 208 | from mercurial.pycompat import ( |
|
209 | 209 | getattr, |
|
210 | 210 | open, |
|
211 | 211 | ) |
|
212 | 212 | from mercurial import ( |
|
213 | 213 | bundle2, |
|
214 | 214 | cmdutil, |
|
215 | 215 | context, |
|
216 | 216 | copies, |
|
217 | 217 | destutil, |
|
218 | 218 | discovery, |
|
219 | 219 | encoding, |
|
220 | 220 | error, |
|
221 | 221 | exchange, |
|
222 | 222 | extensions, |
|
223 | 223 | hg, |
|
224 | 224 | logcmdutil, |
|
225 | 225 | merge as mergemod, |
|
226 | 226 | mergestate as mergestatemod, |
|
227 | 227 | mergeutil, |
|
228 | 228 | node, |
|
229 | 229 | obsolete, |
|
230 | 230 | pycompat, |
|
231 | 231 | registrar, |
|
232 | 232 | repair, |
|
233 | 233 | rewriteutil, |
|
234 | 234 | scmutil, |
|
235 | 235 | state as statemod, |
|
236 | 236 | util, |
|
237 | 237 | ) |
|
238 | 238 | from mercurial.utils import ( |
|
239 | 239 | dateutil, |
|
240 | 240 | stringutil, |
|
241 | 241 | ) |
|
242 | 242 | |
|
243 | 243 | pickle = util.pickle |
|
244 | 244 | cmdtable = {} |
|
245 | 245 | command = registrar.command(cmdtable) |
|
246 | 246 | |
|
247 | 247 | configtable = {} |
|
248 | 248 | configitem = registrar.configitem(configtable) |
|
249 | 249 | configitem( |
|
250 | 250 | b'experimental', b'histedit.autoverb', default=False, |
|
251 | 251 | ) |
|
252 | 252 | configitem( |
|
253 | 253 | b'histedit', b'defaultrev', default=None, |
|
254 | 254 | ) |
|
255 | 255 | configitem( |
|
256 | 256 | b'histedit', b'dropmissing', default=False, |
|
257 | 257 | ) |
|
258 | 258 | configitem( |
|
259 | 259 | b'histedit', b'linelen', default=80, |
|
260 | 260 | ) |
|
261 | 261 | configitem( |
|
262 | 262 | b'histedit', b'singletransaction', default=False, |
|
263 | 263 | ) |
|
264 | 264 | configitem( |
|
265 | 265 | b'ui', b'interface.histedit', default=None, |
|
266 | 266 | ) |
|
267 | 267 | configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}') |
|
268 | 268 | |
|
269 | 269 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
270 | 270 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
271 | 271 | # be specifying the version(s) of Mercurial they are tested with, or |
|
272 | 272 | # leave the attribute unspecified. |
|
273 | 273 | testedwith = b'ships-with-hg-core' |
|
274 | 274 | |
|
275 | 275 | actiontable = {} |
|
276 | 276 | primaryactions = set() |
|
277 | 277 | secondaryactions = set() |
|
278 | 278 | tertiaryactions = set() |
|
279 | 279 | internalactions = set() |
|
280 | 280 | |
|
281 | 281 | |
|
282 | 282 | def geteditcomment(ui, first, last): |
|
283 | 283 | """ construct the editor comment |
|
284 | 284 | The comment includes:: |
|
285 | 285 | - an intro |
|
286 | 286 | - sorted primary commands |
|
287 | 287 | - sorted short commands |
|
288 | 288 | - sorted long commands |
|
289 | 289 | - additional hints |
|
290 | 290 | |
|
291 | 291 | Commands are only included once. |
|
292 | 292 | """ |
|
293 | 293 | intro = _( |
|
294 | 294 | b"""Edit history between %s and %s |
|
295 | 295 | |
|
296 | 296 | Commits are listed from least to most recent |
|
297 | 297 | |
|
298 | 298 | You can reorder changesets by reordering the lines |
|
299 | 299 | |
|
300 | 300 | Commands: |
|
301 | 301 | """ |
|
302 | 302 | ) |
|
303 | 303 | actions = [] |
|
304 | 304 | |
|
305 | 305 | def addverb(v): |
|
306 | 306 | a = actiontable[v] |
|
307 | 307 | lines = a.message.split(b"\n") |
|
308 | 308 | if len(a.verbs): |
|
309 | 309 | v = b', '.join(sorted(a.verbs, key=lambda v: len(v))) |
|
310 | 310 | actions.append(b" %s = %s" % (v, lines[0])) |
|
311 | 311 | actions.extend([b' %s'] * (len(lines) - 1)) |
|
312 | 312 | |
|
313 | 313 | for v in ( |
|
314 | 314 | sorted(primaryactions) |
|
315 | 315 | + sorted(secondaryactions) |
|
316 | 316 | + sorted(tertiaryactions) |
|
317 | 317 | ): |
|
318 | 318 | addverb(v) |
|
319 | 319 | actions.append(b'') |
|
320 | 320 | |
|
321 | 321 | hints = [] |
|
322 | 322 | if ui.configbool(b'histedit', b'dropmissing'): |
|
323 | 323 | hints.append( |
|
324 | 324 | b"Deleting a changeset from the list " |
|
325 | 325 | b"will DISCARD it from the edited history!" |
|
326 | 326 | ) |
|
327 | 327 | |
|
328 | 328 | lines = (intro % (first, last)).split(b'\n') + actions + hints |
|
329 | 329 | |
|
330 | 330 | return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines]) |
|
331 | 331 | |
|
332 | 332 | |
|
333 | 333 | class histeditstate(object): |
|
334 | 334 | def __init__(self, repo): |
|
335 | 335 | self.repo = repo |
|
336 | 336 | self.actions = None |
|
337 | 337 | self.keep = None |
|
338 | 338 | self.topmost = None |
|
339 | 339 | self.parentctxnode = None |
|
340 | 340 | self.lock = None |
|
341 | 341 | self.wlock = None |
|
342 | 342 | self.backupfile = None |
|
343 | 343 | self.stateobj = statemod.cmdstate(repo, b'histedit-state') |
|
344 | 344 | self.replacements = [] |
|
345 | 345 | |
|
346 | 346 | def read(self): |
|
347 | 347 | """Load histedit state from disk and set fields appropriately.""" |
|
348 | 348 | if not self.stateobj.exists(): |
|
349 | 349 | cmdutil.wrongtooltocontinue(self.repo, _(b'histedit')) |
|
350 | 350 | |
|
351 | 351 | data = self._read() |
|
352 | 352 | |
|
353 | 353 | self.parentctxnode = data[b'parentctxnode'] |
|
354 | 354 | actions = parserules(data[b'rules'], self) |
|
355 | 355 | self.actions = actions |
|
356 | 356 | self.keep = data[b'keep'] |
|
357 | 357 | self.topmost = data[b'topmost'] |
|
358 | 358 | self.replacements = data[b'replacements'] |
|
359 | 359 | self.backupfile = data[b'backupfile'] |
|
360 | 360 | |
|
361 | 361 | def _read(self): |
|
362 | 362 | fp = self.repo.vfs.read(b'histedit-state') |
|
363 | 363 | if fp.startswith(b'v1\n'): |
|
364 | 364 | data = self._load() |
|
365 | 365 | parentctxnode, rules, keep, topmost, replacements, backupfile = data |
|
366 | 366 | else: |
|
367 | 367 | data = pickle.loads(fp) |
|
368 | 368 | parentctxnode, rules, keep, topmost, replacements = data |
|
369 | 369 | backupfile = None |
|
370 | 370 | rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules]) |
|
371 | 371 | |
|
372 | 372 | return { |
|
373 | 373 | b'parentctxnode': parentctxnode, |
|
374 | 374 | b"rules": rules, |
|
375 | 375 | b"keep": keep, |
|
376 | 376 | b"topmost": topmost, |
|
377 | 377 | b"replacements": replacements, |
|
378 | 378 | b"backupfile": backupfile, |
|
379 | 379 | } |
|
380 | 380 | |
|
381 | 381 | def write(self, tr=None): |
|
382 | 382 | if tr: |
|
383 | 383 | tr.addfilegenerator( |
|
384 | 384 | b'histedit-state', |
|
385 | 385 | (b'histedit-state',), |
|
386 | 386 | self._write, |
|
387 | 387 | location=b'plain', |
|
388 | 388 | ) |
|
389 | 389 | else: |
|
390 | 390 | with self.repo.vfs(b"histedit-state", b"w") as f: |
|
391 | 391 | self._write(f) |
|
392 | 392 | |
|
393 | 393 | def _write(self, fp): |
|
394 | 394 | fp.write(b'v1\n') |
|
395 | 395 | fp.write(b'%s\n' % node.hex(self.parentctxnode)) |
|
396 | 396 | fp.write(b'%s\n' % node.hex(self.topmost)) |
|
397 | 397 | fp.write(b'%s\n' % (b'True' if self.keep else b'False')) |
|
398 | 398 | fp.write(b'%d\n' % len(self.actions)) |
|
399 | 399 | for action in self.actions: |
|
400 | 400 | fp.write(b'%s\n' % action.tostate()) |
|
401 | 401 | fp.write(b'%d\n' % len(self.replacements)) |
|
402 | 402 | for replacement in self.replacements: |
|
403 | 403 | fp.write( |
|
404 | 404 | b'%s%s\n' |
|
405 | 405 | % ( |
|
406 | 406 | node.hex(replacement[0]), |
|
407 | 407 | b''.join(node.hex(r) for r in replacement[1]), |
|
408 | 408 | ) |
|
409 | 409 | ) |
|
410 | 410 | backupfile = self.backupfile |
|
411 | 411 | if not backupfile: |
|
412 | 412 | backupfile = b'' |
|
413 | 413 | fp.write(b'%s\n' % backupfile) |
|
414 | 414 | |
|
415 | 415 | def _load(self): |
|
416 | 416 | fp = self.repo.vfs(b'histedit-state', b'r') |
|
417 | 417 | lines = [l[:-1] for l in fp.readlines()] |
|
418 | 418 | |
|
419 | 419 | index = 0 |
|
420 | 420 | lines[index] # version number |
|
421 | 421 | index += 1 |
|
422 | 422 | |
|
423 | 423 | parentctxnode = node.bin(lines[index]) |
|
424 | 424 | index += 1 |
|
425 | 425 | |
|
426 | 426 | topmost = node.bin(lines[index]) |
|
427 | 427 | index += 1 |
|
428 | 428 | |
|
429 | 429 | keep = lines[index] == b'True' |
|
430 | 430 | index += 1 |
|
431 | 431 | |
|
432 | 432 | # Rules |
|
433 | 433 | rules = [] |
|
434 | 434 | rulelen = int(lines[index]) |
|
435 | 435 | index += 1 |
|
436 | 436 | for i in pycompat.xrange(rulelen): |
|
437 | 437 | ruleaction = lines[index] |
|
438 | 438 | index += 1 |
|
439 | 439 | rule = lines[index] |
|
440 | 440 | index += 1 |
|
441 | 441 | rules.append((ruleaction, rule)) |
|
442 | 442 | |
|
443 | 443 | # Replacements |
|
444 | 444 | replacements = [] |
|
445 | 445 | replacementlen = int(lines[index]) |
|
446 | 446 | index += 1 |
|
447 | 447 | for i in pycompat.xrange(replacementlen): |
|
448 | 448 | replacement = lines[index] |
|
449 | 449 | original = node.bin(replacement[:40]) |
|
450 | 450 | succ = [ |
|
451 | 451 | node.bin(replacement[i : i + 40]) |
|
452 | 452 | for i in range(40, len(replacement), 40) |
|
453 | 453 | ] |
|
454 | 454 | replacements.append((original, succ)) |
|
455 | 455 | index += 1 |
|
456 | 456 | |
|
457 | 457 | backupfile = lines[index] |
|
458 | 458 | index += 1 |
|
459 | 459 | |
|
460 | 460 | fp.close() |
|
461 | 461 | |
|
462 | 462 | return parentctxnode, rules, keep, topmost, replacements, backupfile |
|
463 | 463 | |
|
464 | 464 | def clear(self): |
|
465 | 465 | if self.inprogress(): |
|
466 | 466 | self.repo.vfs.unlink(b'histedit-state') |
|
467 | 467 | |
|
468 | 468 | def inprogress(self): |
|
469 | 469 | return self.repo.vfs.exists(b'histedit-state') |
|
470 | 470 | |
|
471 | 471 | |
|
472 | 472 | class histeditaction(object): |
|
473 | 473 | def __init__(self, state, node): |
|
474 | 474 | self.state = state |
|
475 | 475 | self.repo = state.repo |
|
476 | 476 | self.node = node |
|
477 | 477 | |
|
478 | 478 | @classmethod |
|
479 | 479 | def fromrule(cls, state, rule): |
|
480 | 480 | """Parses the given rule, returning an instance of the histeditaction. |
|
481 | 481 | """ |
|
482 | 482 | ruleid = rule.strip().split(b' ', 1)[0] |
|
483 | 483 | # ruleid can be anything from rev numbers, hashes, "bookmarks" etc |
|
484 | 484 | # Check for validation of rule ids and get the rulehash |
|
485 | 485 | try: |
|
486 | 486 | rev = node.bin(ruleid) |
|
487 | 487 | except TypeError: |
|
488 | 488 | try: |
|
489 | 489 | _ctx = scmutil.revsingle(state.repo, ruleid) |
|
490 | 490 | rulehash = _ctx.hex() |
|
491 | 491 | rev = node.bin(rulehash) |
|
492 | 492 | except error.RepoLookupError: |
|
493 | 493 | raise error.ParseError(_(b"invalid changeset %s") % ruleid) |
|
494 | 494 | return cls(state, rev) |
|
495 | 495 | |
|
496 | 496 | def verify(self, prev, expected, seen): |
|
497 | 497 | """ Verifies semantic correctness of the rule""" |
|
498 | 498 | repo = self.repo |
|
499 | 499 | ha = node.hex(self.node) |
|
500 | 500 | self.node = scmutil.resolvehexnodeidprefix(repo, ha) |
|
501 | 501 | if self.node is None: |
|
502 | 502 | raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12]) |
|
503 | 503 | self._verifynodeconstraints(prev, expected, seen) |
|
504 | 504 | |
|
505 | 505 | def _verifynodeconstraints(self, prev, expected, seen): |
|
506 | 506 | # by default command need a node in the edited list |
|
507 | 507 | if self.node not in expected: |
|
508 | 508 | raise error.ParseError( |
|
509 | 509 | _(b'%s "%s" changeset was not a candidate') |
|
510 | 510 | % (self.verb, node.short(self.node)), |
|
511 | 511 | hint=_(b'only use listed changesets'), |
|
512 | 512 | ) |
|
513 | 513 | # and only one command per node |
|
514 | 514 | if self.node in seen: |
|
515 | 515 | raise error.ParseError( |
|
516 | 516 | _(b'duplicated command for changeset %s') |
|
517 | 517 | % node.short(self.node) |
|
518 | 518 | ) |
|
519 | 519 | |
|
520 | 520 | def torule(self): |
|
521 | 521 | """build a histedit rule line for an action |
|
522 | 522 | |
|
523 | 523 | by default lines are in the form: |
|
524 | 524 | <hash> <rev> <summary> |
|
525 | 525 | """ |
|
526 | 526 | ctx = self.repo[self.node] |
|
527 | 527 | ui = self.repo.ui |
|
528 | 528 | summary = ( |
|
529 | 529 | cmdutil.rendertemplate( |
|
530 | 530 | ctx, ui.config(b'histedit', b'summary-template') |
|
531 | 531 | ) |
|
532 | 532 | or b'' |
|
533 | 533 | ) |
|
534 | 534 | summary = summary.splitlines()[0] |
|
535 | 535 | line = b'%s %s %s' % (self.verb, ctx, summary) |
|
536 | 536 | # trim to 75 columns by default so it's not stupidly wide in my editor |
|
537 | 537 | # (the 5 more are left for verb) |
|
538 | 538 | maxlen = self.repo.ui.configint(b'histedit', b'linelen') |
|
539 | 539 | maxlen = max(maxlen, 22) # avoid truncating hash |
|
540 | 540 | return stringutil.ellipsis(line, maxlen) |
|
541 | 541 | |
|
542 | 542 | def tostate(self): |
|
543 | 543 | """Print an action in format used by histedit state files |
|
544 | 544 | (the first line is a verb, the remainder is the second) |
|
545 | 545 | """ |
|
546 | 546 | return b"%s\n%s" % (self.verb, node.hex(self.node)) |
|
547 | 547 | |
|
548 | 548 | def run(self): |
|
549 | 549 | """Runs the action. The default behavior is simply apply the action's |
|
550 | 550 | rulectx onto the current parentctx.""" |
|
551 | 551 | self.applychange() |
|
552 | 552 | self.continuedirty() |
|
553 | 553 | return self.continueclean() |
|
554 | 554 | |
|
555 | 555 | def applychange(self): |
|
556 | 556 | """Applies the changes from this action's rulectx onto the current |
|
557 | 557 | parentctx, but does not commit them.""" |
|
558 | 558 | repo = self.repo |
|
559 | 559 | rulectx = repo[self.node] |
|
560 | 560 | repo.ui.pushbuffer(error=True, labeled=True) |
|
561 | 561 | hg.update(repo, self.state.parentctxnode, quietempty=True) |
|
562 | 562 | repo.ui.popbuffer() |
|
563 | 563 | stats = applychanges(repo.ui, repo, rulectx, {}) |
|
564 | 564 | repo.dirstate.setbranch(rulectx.branch()) |
|
565 | 565 | if stats.unresolvedcount: |
|
566 | 566 | raise error.InterventionRequired( |
|
567 | 567 | _(b'Fix up the change (%s %s)') |
|
568 | 568 | % (self.verb, node.short(self.node)), |
|
569 | 569 | hint=_(b'hg histedit --continue to resume'), |
|
570 | 570 | ) |
|
571 | 571 | |
|
572 | 572 | def continuedirty(self): |
|
573 | 573 | """Continues the action when changes have been applied to the working |
|
574 | 574 | copy. The default behavior is to commit the dirty changes.""" |
|
575 | 575 | repo = self.repo |
|
576 | 576 | rulectx = repo[self.node] |
|
577 | 577 | |
|
578 | 578 | editor = self.commiteditor() |
|
579 | 579 | commit = commitfuncfor(repo, rulectx) |
|
580 | 580 | if repo.ui.configbool(b'rewrite', b'update-timestamp'): |
|
581 | 581 | date = dateutil.makedate() |
|
582 | 582 | else: |
|
583 | 583 | date = rulectx.date() |
|
584 | 584 | commit( |
|
585 | 585 | text=rulectx.description(), |
|
586 | 586 | user=rulectx.user(), |
|
587 | 587 | date=date, |
|
588 | 588 | extra=rulectx.extra(), |
|
589 | 589 | editor=editor, |
|
590 | 590 | ) |
|
591 | 591 | |
|
592 | 592 | def commiteditor(self): |
|
593 | 593 | """The editor to be used to edit the commit message.""" |
|
594 | 594 | return False |
|
595 | 595 | |
|
596 | 596 | def continueclean(self): |
|
597 | 597 | """Continues the action when the working copy is clean. The default |
|
598 | 598 | behavior is to accept the current commit as the new version of the |
|
599 | 599 | rulectx.""" |
|
600 | 600 | ctx = self.repo[b'.'] |
|
601 | 601 | if ctx.node() == self.state.parentctxnode: |
|
602 | 602 | self.repo.ui.warn( |
|
603 | 603 | _(b'%s: skipping changeset (no changes)\n') |
|
604 | 604 | % node.short(self.node) |
|
605 | 605 | ) |
|
606 | 606 | return ctx, [(self.node, tuple())] |
|
607 | 607 | if ctx.node() == self.node: |
|
608 | 608 | # Nothing changed |
|
609 | 609 | return ctx, [] |
|
610 | 610 | return ctx, [(self.node, (ctx.node(),))] |
|
611 | 611 | |
|
612 | 612 | |
|
613 | 613 | def commitfuncfor(repo, src): |
|
614 | 614 | """Build a commit function for the replacement of <src> |
|
615 | 615 | |
|
616 | 616 | This function ensure we apply the same treatment to all changesets. |
|
617 | 617 | |
|
618 | 618 | - Add a 'histedit_source' entry in extra. |
|
619 | 619 | |
|
620 | 620 | Note that fold has its own separated logic because its handling is a bit |
|
621 | 621 | different and not easily factored out of the fold method. |
|
622 | 622 | """ |
|
623 | 623 | phasemin = src.phase() |
|
624 | 624 | |
|
625 | 625 | def commitfunc(**kwargs): |
|
626 | 626 | overrides = {(b'phases', b'new-commit'): phasemin} |
|
627 | 627 | with repo.ui.configoverride(overrides, b'histedit'): |
|
628 | 628 | extra = kwargs.get('extra', {}).copy() |
|
629 | 629 | extra[b'histedit_source'] = src.hex() |
|
630 | 630 | kwargs['extra'] = extra |
|
631 | 631 | return repo.commit(**kwargs) |
|
632 | 632 | |
|
633 | 633 | return commitfunc |
|
634 | 634 | |
|
635 | 635 | |
|
636 | 636 | def applychanges(ui, repo, ctx, opts): |
|
637 | 637 | """Merge changeset from ctx (only) in the current working directory""" |
|
638 | 638 | if ctx.p1().node() == repo.dirstate.p1(): |
|
639 | 639 | # edits are "in place" we do not need to make any merge, |
|
640 | 640 | # just applies changes on parent for editing |
|
641 | 641 | ui.pushbuffer() |
|
642 | 642 | cmdutil.revert(ui, repo, ctx, all=True) |
|
643 | 643 | stats = mergemod.updateresult(0, 0, 0, 0) |
|
644 | 644 | ui.popbuffer() |
|
645 | 645 | else: |
|
646 | 646 | try: |
|
647 | 647 | # ui.forcemerge is an internal variable, do not document |
|
648 | 648 | repo.ui.setconfig( |
|
649 | 649 | b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit' |
|
650 | 650 | ) |
|
651 | 651 | stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit']) |
|
652 | 652 | finally: |
|
653 | 653 | repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit') |
|
654 | 654 | return stats |
|
655 | 655 | |
|
656 | 656 | |
|
657 | 657 | def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False): |
|
658 | 658 | """collapse the set of revisions from first to last as new one. |
|
659 | 659 | |
|
660 | 660 | Expected commit options are: |
|
661 | 661 | - message |
|
662 | 662 | - date |
|
663 | 663 | - username |
|
664 | 664 | Commit message is edited in all cases. |
|
665 | 665 | |
|
666 | 666 | This function works in memory.""" |
|
667 | 667 | ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev())) |
|
668 | 668 | if not ctxs: |
|
669 | 669 | return None |
|
670 | 670 | for c in ctxs: |
|
671 | 671 | if not c.mutable(): |
|
672 | 672 | raise error.ParseError( |
|
673 | 673 | _(b"cannot fold into public change %s") % node.short(c.node()) |
|
674 | 674 | ) |
|
675 | 675 | base = firstctx.p1() |
|
676 | 676 | |
|
677 | 677 | # commit a new version of the old changeset, including the update |
|
678 | 678 | # collect all files which might be affected |
|
679 | 679 | files = set() |
|
680 | 680 | for ctx in ctxs: |
|
681 | 681 | files.update(ctx.files()) |
|
682 | 682 | |
|
683 | 683 | # Recompute copies (avoid recording a -> b -> a) |
|
684 | 684 | copied = copies.pathcopies(base, lastctx) |
|
685 | 685 | |
|
686 | 686 | # prune files which were reverted by the updates |
|
687 | 687 | files = [f for f in files if not cmdutil.samefile(f, lastctx, base)] |
|
688 | 688 | # commit version of these files as defined by head |
|
689 | 689 | headmf = lastctx.manifest() |
|
690 | 690 | |
|
691 | 691 | def filectxfn(repo, ctx, path): |
|
692 | 692 | if path in headmf: |
|
693 | 693 | fctx = lastctx[path] |
|
694 | 694 | flags = fctx.flags() |
|
695 | 695 | mctx = context.memfilectx( |
|
696 | 696 | repo, |
|
697 | 697 | ctx, |
|
698 | 698 | fctx.path(), |
|
699 | 699 | fctx.data(), |
|
700 | 700 | islink=b'l' in flags, |
|
701 | 701 | isexec=b'x' in flags, |
|
702 | 702 | copysource=copied.get(path), |
|
703 | 703 | ) |
|
704 | 704 | return mctx |
|
705 | 705 | return None |
|
706 | 706 | |
|
707 | 707 | if commitopts.get(b'message'): |
|
708 | 708 | message = commitopts[b'message'] |
|
709 | 709 | else: |
|
710 | 710 | message = firstctx.description() |
|
711 | 711 | user = commitopts.get(b'user') |
|
712 | 712 | date = commitopts.get(b'date') |
|
713 | 713 | extra = commitopts.get(b'extra') |
|
714 | 714 | |
|
715 | 715 | parents = (firstctx.p1().node(), firstctx.p2().node()) |
|
716 | 716 | editor = None |
|
717 | 717 | if not skipprompt: |
|
718 | 718 | editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold') |
|
719 | 719 | new = context.memctx( |
|
720 | 720 | repo, |
|
721 | 721 | parents=parents, |
|
722 | 722 | text=message, |
|
723 | 723 | files=files, |
|
724 | 724 | filectxfn=filectxfn, |
|
725 | 725 | user=user, |
|
726 | 726 | date=date, |
|
727 | 727 | extra=extra, |
|
728 | 728 | editor=editor, |
|
729 | 729 | ) |
|
730 | 730 | return repo.commitctx(new) |
|
731 | 731 | |
|
732 | 732 | |
|
733 | 733 | def _isdirtywc(repo): |
|
734 | 734 | return repo[None].dirty(missing=True) |
|
735 | 735 | |
|
736 | 736 | |
|
737 | 737 | def abortdirty(): |
|
738 | 738 | raise error.Abort( |
|
739 | 739 | _(b'working copy has pending changes'), |
|
740 | 740 | hint=_( |
|
741 | 741 | b'amend, commit, or revert them and run histedit ' |
|
742 | 742 | b'--continue, or abort with histedit --abort' |
|
743 | 743 | ), |
|
744 | 744 | ) |
|
745 | 745 | |
|
746 | 746 | |
|
747 | 747 | def action(verbs, message, priority=False, internal=False): |
|
748 | 748 | def wrap(cls): |
|
749 | 749 | assert not priority or not internal |
|
750 | 750 | verb = verbs[0] |
|
751 | 751 | if priority: |
|
752 | 752 | primaryactions.add(verb) |
|
753 | 753 | elif internal: |
|
754 | 754 | internalactions.add(verb) |
|
755 | 755 | elif len(verbs) > 1: |
|
756 | 756 | secondaryactions.add(verb) |
|
757 | 757 | else: |
|
758 | 758 | tertiaryactions.add(verb) |
|
759 | 759 | |
|
760 | 760 | cls.verb = verb |
|
761 | 761 | cls.verbs = verbs |
|
762 | 762 | cls.message = message |
|
763 | 763 | for verb in verbs: |
|
764 | 764 | actiontable[verb] = cls |
|
765 | 765 | return cls |
|
766 | 766 | |
|
767 | 767 | return wrap |
|
768 | 768 | |
|
769 | 769 | |
|
770 | 770 | @action([b'pick', b'p'], _(b'use commit'), priority=True) |
|
771 | 771 | class pick(histeditaction): |
|
772 | 772 | def run(self): |
|
773 | 773 | rulectx = self.repo[self.node] |
|
774 | 774 | if rulectx.p1().node() == self.state.parentctxnode: |
|
775 | 775 | self.repo.ui.debug(b'node %s unchanged\n' % node.short(self.node)) |
|
776 | 776 | return rulectx, [] |
|
777 | 777 | |
|
778 | 778 | return super(pick, self).run() |
|
779 | 779 | |
|
780 | 780 | |
|
781 | 781 | @action([b'edit', b'e'], _(b'use commit, but stop for amending'), priority=True) |
|
782 | 782 | class edit(histeditaction): |
|
783 | 783 | def run(self): |
|
784 | 784 | repo = self.repo |
|
785 | 785 | rulectx = repo[self.node] |
|
786 | 786 | hg.update(repo, self.state.parentctxnode, quietempty=True) |
|
787 | 787 | applychanges(repo.ui, repo, rulectx, {}) |
|
788 | 788 | raise error.InterventionRequired( |
|
789 | 789 | _(b'Editing (%s), you may commit or record as needed now.') |
|
790 | 790 | % node.short(self.node), |
|
791 | 791 | hint=_(b'hg histedit --continue to resume'), |
|
792 | 792 | ) |
|
793 | 793 | |
|
794 | 794 | def commiteditor(self): |
|
795 | 795 | return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit') |
|
796 | 796 | |
|
797 | 797 | |
|
798 | 798 | @action([b'fold', b'f'], _(b'use commit, but combine it with the one above')) |
|
799 | 799 | class fold(histeditaction): |
|
800 | 800 | def verify(self, prev, expected, seen): |
|
801 | 801 | """ Verifies semantic correctness of the fold rule""" |
|
802 | 802 | super(fold, self).verify(prev, expected, seen) |
|
803 | 803 | repo = self.repo |
|
804 | 804 | if not prev: |
|
805 | 805 | c = repo[self.node].p1() |
|
806 | 806 | elif not prev.verb in (b'pick', b'base'): |
|
807 | 807 | return |
|
808 | 808 | else: |
|
809 | 809 | c = repo[prev.node] |
|
810 | 810 | if not c.mutable(): |
|
811 | 811 | raise error.ParseError( |
|
812 | 812 | _(b"cannot fold into public change %s") % node.short(c.node()) |
|
813 | 813 | ) |
|
814 | 814 | |
|
815 | 815 | def continuedirty(self): |
|
816 | 816 | repo = self.repo |
|
817 | 817 | rulectx = repo[self.node] |
|
818 | 818 | |
|
819 | 819 | commit = commitfuncfor(repo, rulectx) |
|
820 | 820 | commit( |
|
821 | 821 | text=b'fold-temp-revision %s' % node.short(self.node), |
|
822 | 822 | user=rulectx.user(), |
|
823 | 823 | date=rulectx.date(), |
|
824 | 824 | extra=rulectx.extra(), |
|
825 | 825 | ) |
|
826 | 826 | |
|
827 | 827 | def continueclean(self): |
|
828 | 828 | repo = self.repo |
|
829 | 829 | ctx = repo[b'.'] |
|
830 | 830 | rulectx = repo[self.node] |
|
831 | 831 | parentctxnode = self.state.parentctxnode |
|
832 | 832 | if ctx.node() == parentctxnode: |
|
833 | 833 | repo.ui.warn(_(b'%s: empty changeset\n') % node.short(self.node)) |
|
834 | 834 | return ctx, [(self.node, (parentctxnode,))] |
|
835 | 835 | |
|
836 | 836 | parentctx = repo[parentctxnode] |
|
837 | 837 | newcommits = { |
|
838 | 838 | c.node() |
|
839 | 839 | for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev()) |
|
840 | 840 | } |
|
841 | 841 | if not newcommits: |
|
842 | 842 | repo.ui.warn( |
|
843 | 843 | _( |
|
844 | 844 | b'%s: cannot fold - working copy is not a ' |
|
845 | 845 | b'descendant of previous commit %s\n' |
|
846 | 846 | ) |
|
847 | 847 | % (node.short(self.node), node.short(parentctxnode)) |
|
848 | 848 | ) |
|
849 | 849 | return ctx, [(self.node, (ctx.node(),))] |
|
850 | 850 | |
|
851 | 851 | middlecommits = newcommits.copy() |
|
852 | 852 | middlecommits.discard(ctx.node()) |
|
853 | 853 | |
|
854 | 854 | return self.finishfold( |
|
855 | 855 | repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits |
|
856 | 856 | ) |
|
857 | 857 | |
|
858 | 858 | def skipprompt(self): |
|
859 | 859 | """Returns true if the rule should skip the message editor. |
|
860 | 860 | |
|
861 | 861 | For example, 'fold' wants to show an editor, but 'rollup' |
|
862 | 862 | doesn't want to. |
|
863 | 863 | """ |
|
864 | 864 | return False |
|
865 | 865 | |
|
866 | 866 | def mergedescs(self): |
|
867 | 867 | """Returns true if the rule should merge messages of multiple changes. |
|
868 | 868 | |
|
869 | 869 | This exists mainly so that 'rollup' rules can be a subclass of |
|
870 | 870 | 'fold'. |
|
871 | 871 | """ |
|
872 | 872 | return True |
|
873 | 873 | |
|
874 | 874 | def firstdate(self): |
|
875 | 875 | """Returns true if the rule should preserve the date of the first |
|
876 | 876 | change. |
|
877 | 877 | |
|
878 | 878 | This exists mainly so that 'rollup' rules can be a subclass of |
|
879 | 879 | 'fold'. |
|
880 | 880 | """ |
|
881 | 881 | return False |
|
882 | 882 | |
|
883 | 883 | def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges): |
|
884 |
|
|
|
885 | hg.updaterepo(repo, parent, overwrite=False) | |
|
884 | mergemod.update(ctx.p1()) | |
|
886 | 885 | ### prepare new commit data |
|
887 | 886 | commitopts = {} |
|
888 | 887 | commitopts[b'user'] = ctx.user() |
|
889 | 888 | # commit message |
|
890 | 889 | if not self.mergedescs(): |
|
891 | 890 | newmessage = ctx.description() |
|
892 | 891 | else: |
|
893 | 892 | newmessage = ( |
|
894 | 893 | b'\n***\n'.join( |
|
895 | 894 | [ctx.description()] |
|
896 | 895 | + [repo[r].description() for r in internalchanges] |
|
897 | 896 | + [oldctx.description()] |
|
898 | 897 | ) |
|
899 | 898 | + b'\n' |
|
900 | 899 | ) |
|
901 | 900 | commitopts[b'message'] = newmessage |
|
902 | 901 | # date |
|
903 | 902 | if self.firstdate(): |
|
904 | 903 | commitopts[b'date'] = ctx.date() |
|
905 | 904 | else: |
|
906 | 905 | commitopts[b'date'] = max(ctx.date(), oldctx.date()) |
|
907 | 906 | # if date is to be updated to current |
|
908 | 907 | if ui.configbool(b'rewrite', b'update-timestamp'): |
|
909 | 908 | commitopts[b'date'] = dateutil.makedate() |
|
910 | 909 | |
|
911 | 910 | extra = ctx.extra().copy() |
|
912 | 911 | # histedit_source |
|
913 | 912 | # note: ctx is likely a temporary commit but that the best we can do |
|
914 | 913 | # here. This is sufficient to solve issue3681 anyway. |
|
915 | 914 | extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex()) |
|
916 | 915 | commitopts[b'extra'] = extra |
|
917 | 916 | phasemin = max(ctx.phase(), oldctx.phase()) |
|
918 | 917 | overrides = {(b'phases', b'new-commit'): phasemin} |
|
919 | 918 | with repo.ui.configoverride(overrides, b'histedit'): |
|
920 | 919 | n = collapse( |
|
921 | 920 | repo, |
|
922 | 921 | ctx, |
|
923 | 922 | repo[newnode], |
|
924 | 923 | commitopts, |
|
925 | 924 | skipprompt=self.skipprompt(), |
|
926 | 925 | ) |
|
927 | 926 | if n is None: |
|
928 | 927 | return ctx, [] |
|
929 | hg.updaterepo(repo, n, overwrite=False) | |
|
928 | mergemod.update(repo[n]) | |
|
930 | 929 | replacements = [ |
|
931 | 930 | (oldctx.node(), (newnode,)), |
|
932 | 931 | (ctx.node(), (n,)), |
|
933 | 932 | (newnode, (n,)), |
|
934 | 933 | ] |
|
935 | 934 | for ich in internalchanges: |
|
936 | 935 | replacements.append((ich, (n,))) |
|
937 | 936 | return repo[n], replacements |
|
938 | 937 | |
|
939 | 938 | |
|
940 | 939 | @action( |
|
941 | 940 | [b'base', b'b'], |
|
942 | 941 | _(b'checkout changeset and apply further changesets from there'), |
|
943 | 942 | ) |
|
944 | 943 | class base(histeditaction): |
|
945 | 944 | def run(self): |
|
946 | 945 | if self.repo[b'.'].node() != self.node: |
|
947 | 946 | mergemod.clean_update(self.repo[self.node]) |
|
948 | 947 | return self.continueclean() |
|
949 | 948 | |
|
950 | 949 | def continuedirty(self): |
|
951 | 950 | abortdirty() |
|
952 | 951 | |
|
953 | 952 | def continueclean(self): |
|
954 | 953 | basectx = self.repo[b'.'] |
|
955 | 954 | return basectx, [] |
|
956 | 955 | |
|
957 | 956 | def _verifynodeconstraints(self, prev, expected, seen): |
|
958 | 957 | # base can only be use with a node not in the edited set |
|
959 | 958 | if self.node in expected: |
|
960 | 959 | msg = _(b'%s "%s" changeset was an edited list candidate') |
|
961 | 960 | raise error.ParseError( |
|
962 | 961 | msg % (self.verb, node.short(self.node)), |
|
963 | 962 | hint=_(b'base must only use unlisted changesets'), |
|
964 | 963 | ) |
|
965 | 964 | |
|
966 | 965 | |
|
967 | 966 | @action( |
|
968 | 967 | [b'_multifold'], |
|
969 | 968 | _( |
|
970 | 969 | """fold subclass used for when multiple folds happen in a row |
|
971 | 970 | |
|
972 | 971 | We only want to fire the editor for the folded message once when |
|
973 | 972 | (say) four changes are folded down into a single change. This is |
|
974 | 973 | similar to rollup, but we should preserve both messages so that |
|
975 | 974 | when the last fold operation runs we can show the user all the |
|
976 | 975 | commit messages in their editor. |
|
977 | 976 | """ |
|
978 | 977 | ), |
|
979 | 978 | internal=True, |
|
980 | 979 | ) |
|
981 | 980 | class _multifold(fold): |
|
982 | 981 | def skipprompt(self): |
|
983 | 982 | return True |
|
984 | 983 | |
|
985 | 984 | |
|
986 | 985 | @action( |
|
987 | 986 | [b"roll", b"r"], |
|
988 | 987 | _(b"like fold, but discard this commit's description and date"), |
|
989 | 988 | ) |
|
990 | 989 | class rollup(fold): |
|
991 | 990 | def mergedescs(self): |
|
992 | 991 | return False |
|
993 | 992 | |
|
994 | 993 | def skipprompt(self): |
|
995 | 994 | return True |
|
996 | 995 | |
|
997 | 996 | def firstdate(self): |
|
998 | 997 | return True |
|
999 | 998 | |
|
1000 | 999 | |
|
1001 | 1000 | @action([b"drop", b"d"], _(b'remove commit from history')) |
|
1002 | 1001 | class drop(histeditaction): |
|
1003 | 1002 | def run(self): |
|
1004 | 1003 | parentctx = self.repo[self.state.parentctxnode] |
|
1005 | 1004 | return parentctx, [(self.node, tuple())] |
|
1006 | 1005 | |
|
1007 | 1006 | |
|
1008 | 1007 | @action( |
|
1009 | 1008 | [b"mess", b"m"], |
|
1010 | 1009 | _(b'edit commit message without changing commit content'), |
|
1011 | 1010 | priority=True, |
|
1012 | 1011 | ) |
|
1013 | 1012 | class message(histeditaction): |
|
1014 | 1013 | def commiteditor(self): |
|
1015 | 1014 | return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess') |
|
1016 | 1015 | |
|
1017 | 1016 | |
|
1018 | 1017 | def findoutgoing(ui, repo, remote=None, force=False, opts=None): |
|
1019 | 1018 | """utility function to find the first outgoing changeset |
|
1020 | 1019 | |
|
1021 | 1020 | Used by initialization code""" |
|
1022 | 1021 | if opts is None: |
|
1023 | 1022 | opts = {} |
|
1024 | 1023 | dest = ui.expandpath(remote or b'default-push', remote or b'default') |
|
1025 | 1024 | dest, branches = hg.parseurl(dest, None)[:2] |
|
1026 | 1025 | ui.status(_(b'comparing with %s\n') % util.hidepassword(dest)) |
|
1027 | 1026 | |
|
1028 | 1027 | revs, checkout = hg.addbranchrevs(repo, repo, branches, None) |
|
1029 | 1028 | other = hg.peer(repo, opts, dest) |
|
1030 | 1029 | |
|
1031 | 1030 | if revs: |
|
1032 | 1031 | revs = [repo.lookup(rev) for rev in revs] |
|
1033 | 1032 | |
|
1034 | 1033 | outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force) |
|
1035 | 1034 | if not outgoing.missing: |
|
1036 | 1035 | raise error.Abort(_(b'no outgoing ancestors')) |
|
1037 | 1036 | roots = list(repo.revs(b"roots(%ln)", outgoing.missing)) |
|
1038 | 1037 | if len(roots) > 1: |
|
1039 | 1038 | msg = _(b'there are ambiguous outgoing revisions') |
|
1040 | 1039 | hint = _(b"see 'hg help histedit' for more detail") |
|
1041 | 1040 | raise error.Abort(msg, hint=hint) |
|
1042 | 1041 | return repo[roots[0]].node() |
|
1043 | 1042 | |
|
1044 | 1043 | |
|
1045 | 1044 | # Curses Support |
|
1046 | 1045 | try: |
|
1047 | 1046 | import curses |
|
1048 | 1047 | except ImportError: |
|
1049 | 1048 | curses = None |
|
1050 | 1049 | |
|
1051 | 1050 | KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll'] |
|
1052 | 1051 | ACTION_LABELS = { |
|
1053 | 1052 | b'fold': b'^fold', |
|
1054 | 1053 | b'roll': b'^roll', |
|
1055 | 1054 | } |
|
1056 | 1055 | |
|
1057 | 1056 | COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5 |
|
1058 | 1057 | COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8 |
|
1059 | 1058 | COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11 |
|
1060 | 1059 | |
|
1061 | 1060 | E_QUIT, E_HISTEDIT = 1, 2 |
|
1062 | 1061 | E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7 |
|
1063 | 1062 | MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3 |
|
1064 | 1063 | |
|
1065 | 1064 | KEYTABLE = { |
|
1066 | 1065 | b'global': { |
|
1067 | 1066 | b'h': b'next-action', |
|
1068 | 1067 | b'KEY_RIGHT': b'next-action', |
|
1069 | 1068 | b'l': b'prev-action', |
|
1070 | 1069 | b'KEY_LEFT': b'prev-action', |
|
1071 | 1070 | b'q': b'quit', |
|
1072 | 1071 | b'c': b'histedit', |
|
1073 | 1072 | b'C': b'histedit', |
|
1074 | 1073 | b'v': b'showpatch', |
|
1075 | 1074 | b'?': b'help', |
|
1076 | 1075 | }, |
|
1077 | 1076 | MODE_RULES: { |
|
1078 | 1077 | b'd': b'action-drop', |
|
1079 | 1078 | b'e': b'action-edit', |
|
1080 | 1079 | b'f': b'action-fold', |
|
1081 | 1080 | b'm': b'action-mess', |
|
1082 | 1081 | b'p': b'action-pick', |
|
1083 | 1082 | b'r': b'action-roll', |
|
1084 | 1083 | b' ': b'select', |
|
1085 | 1084 | b'j': b'down', |
|
1086 | 1085 | b'k': b'up', |
|
1087 | 1086 | b'KEY_DOWN': b'down', |
|
1088 | 1087 | b'KEY_UP': b'up', |
|
1089 | 1088 | b'J': b'move-down', |
|
1090 | 1089 | b'K': b'move-up', |
|
1091 | 1090 | b'KEY_NPAGE': b'move-down', |
|
1092 | 1091 | b'KEY_PPAGE': b'move-up', |
|
1093 | 1092 | b'0': b'goto', # Used for 0..9 |
|
1094 | 1093 | }, |
|
1095 | 1094 | MODE_PATCH: { |
|
1096 | 1095 | b' ': b'page-down', |
|
1097 | 1096 | b'KEY_NPAGE': b'page-down', |
|
1098 | 1097 | b'KEY_PPAGE': b'page-up', |
|
1099 | 1098 | b'j': b'line-down', |
|
1100 | 1099 | b'k': b'line-up', |
|
1101 | 1100 | b'KEY_DOWN': b'line-down', |
|
1102 | 1101 | b'KEY_UP': b'line-up', |
|
1103 | 1102 | b'J': b'down', |
|
1104 | 1103 | b'K': b'up', |
|
1105 | 1104 | }, |
|
1106 | 1105 | MODE_HELP: {}, |
|
1107 | 1106 | } |
|
1108 | 1107 | |
|
1109 | 1108 | |
|
1110 | 1109 | def screen_size(): |
|
1111 | 1110 | return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' ')) |
|
1112 | 1111 | |
|
1113 | 1112 | |
|
1114 | 1113 | class histeditrule(object): |
|
1115 | 1114 | def __init__(self, ui, ctx, pos, action=b'pick'): |
|
1116 | 1115 | self.ui = ui |
|
1117 | 1116 | self.ctx = ctx |
|
1118 | 1117 | self.action = action |
|
1119 | 1118 | self.origpos = pos |
|
1120 | 1119 | self.pos = pos |
|
1121 | 1120 | self.conflicts = [] |
|
1122 | 1121 | |
|
1123 | 1122 | def __bytes__(self): |
|
1124 | 1123 | # Example display of several histeditrules: |
|
1125 | 1124 | # |
|
1126 | 1125 | # #10 pick 316392:06a16c25c053 add option to skip tests |
|
1127 | 1126 | # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED> |
|
1128 | 1127 | # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h |
|
1129 | 1128 | # #13 ^fold 316395:14ce5803f4c3 fix warnings |
|
1130 | 1129 | # |
|
1131 | 1130 | # The carets point to the changeset being folded into ("roll this |
|
1132 | 1131 | # changeset into the changeset above"). |
|
1133 | 1132 | return b'%s%s' % (self.prefix, self.desc) |
|
1134 | 1133 | |
|
1135 | 1134 | __str__ = encoding.strmethod(__bytes__) |
|
1136 | 1135 | |
|
1137 | 1136 | @property |
|
1138 | 1137 | def prefix(self): |
|
1139 | 1138 | # Some actions ('fold' and 'roll') combine a patch with a |
|
1140 | 1139 | # previous one. Add a marker showing which patch they apply |
|
1141 | 1140 | # to. |
|
1142 | 1141 | action = ACTION_LABELS.get(self.action, self.action) |
|
1143 | 1142 | |
|
1144 | 1143 | h = self.ctx.hex()[0:12] |
|
1145 | 1144 | r = self.ctx.rev() |
|
1146 | 1145 | |
|
1147 | 1146 | return b"#%s %s %d:%s " % ( |
|
1148 | 1147 | (b'%d' % self.origpos).ljust(2), |
|
1149 | 1148 | action.ljust(6), |
|
1150 | 1149 | r, |
|
1151 | 1150 | h, |
|
1152 | 1151 | ) |
|
1153 | 1152 | |
|
1154 | 1153 | @util.propertycache |
|
1155 | 1154 | def desc(self): |
|
1156 | 1155 | summary = ( |
|
1157 | 1156 | cmdutil.rendertemplate( |
|
1158 | 1157 | self.ctx, self.ui.config(b'histedit', b'summary-template') |
|
1159 | 1158 | ) |
|
1160 | 1159 | or b'' |
|
1161 | 1160 | ) |
|
1162 | 1161 | if summary: |
|
1163 | 1162 | return summary |
|
1164 | 1163 | # This is split off from the prefix property so that we can |
|
1165 | 1164 | # separately make the description for 'roll' red (since it |
|
1166 | 1165 | # will get discarded). |
|
1167 | 1166 | return self.ctx.description().splitlines()[0].strip() |
|
1168 | 1167 | |
|
1169 | 1168 | def checkconflicts(self, other): |
|
1170 | 1169 | if other.pos > self.pos and other.origpos <= self.origpos: |
|
1171 | 1170 | if set(other.ctx.files()) & set(self.ctx.files()) != set(): |
|
1172 | 1171 | self.conflicts.append(other) |
|
1173 | 1172 | return self.conflicts |
|
1174 | 1173 | |
|
1175 | 1174 | if other in self.conflicts: |
|
1176 | 1175 | self.conflicts.remove(other) |
|
1177 | 1176 | return self.conflicts |
|
1178 | 1177 | |
|
1179 | 1178 | |
|
1180 | 1179 | # ============ EVENTS =============== |
|
1181 | 1180 | def movecursor(state, oldpos, newpos): |
|
1182 | 1181 | '''Change the rule/changeset that the cursor is pointing to, regardless of |
|
1183 | 1182 | current mode (you can switch between patches from the view patch window).''' |
|
1184 | 1183 | state[b'pos'] = newpos |
|
1185 | 1184 | |
|
1186 | 1185 | mode, _ = state[b'mode'] |
|
1187 | 1186 | if mode == MODE_RULES: |
|
1188 | 1187 | # Scroll through the list by updating the view for MODE_RULES, so that |
|
1189 | 1188 | # even if we are not currently viewing the rules, switching back will |
|
1190 | 1189 | # result in the cursor's rule being visible. |
|
1191 | 1190 | modestate = state[b'modes'][MODE_RULES] |
|
1192 | 1191 | if newpos < modestate[b'line_offset']: |
|
1193 | 1192 | modestate[b'line_offset'] = newpos |
|
1194 | 1193 | elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1: |
|
1195 | 1194 | modestate[b'line_offset'] = newpos - state[b'page_height'] + 1 |
|
1196 | 1195 | |
|
1197 | 1196 | # Reset the patch view region to the top of the new patch. |
|
1198 | 1197 | state[b'modes'][MODE_PATCH][b'line_offset'] = 0 |
|
1199 | 1198 | |
|
1200 | 1199 | |
|
1201 | 1200 | def changemode(state, mode): |
|
1202 | 1201 | curmode, _ = state[b'mode'] |
|
1203 | 1202 | state[b'mode'] = (mode, curmode) |
|
1204 | 1203 | if mode == MODE_PATCH: |
|
1205 | 1204 | state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state) |
|
1206 | 1205 | |
|
1207 | 1206 | |
|
1208 | 1207 | def makeselection(state, pos): |
|
1209 | 1208 | state[b'selected'] = pos |
|
1210 | 1209 | |
|
1211 | 1210 | |
|
1212 | 1211 | def swap(state, oldpos, newpos): |
|
1213 | 1212 | """Swap two positions and calculate necessary conflicts in |
|
1214 | 1213 | O(|newpos-oldpos|) time""" |
|
1215 | 1214 | |
|
1216 | 1215 | rules = state[b'rules'] |
|
1217 | 1216 | assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules) |
|
1218 | 1217 | |
|
1219 | 1218 | rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos] |
|
1220 | 1219 | |
|
1221 | 1220 | # TODO: swap should not know about histeditrule's internals |
|
1222 | 1221 | rules[newpos].pos = newpos |
|
1223 | 1222 | rules[oldpos].pos = oldpos |
|
1224 | 1223 | |
|
1225 | 1224 | start = min(oldpos, newpos) |
|
1226 | 1225 | end = max(oldpos, newpos) |
|
1227 | 1226 | for r in pycompat.xrange(start, end + 1): |
|
1228 | 1227 | rules[newpos].checkconflicts(rules[r]) |
|
1229 | 1228 | rules[oldpos].checkconflicts(rules[r]) |
|
1230 | 1229 | |
|
1231 | 1230 | if state[b'selected']: |
|
1232 | 1231 | makeselection(state, newpos) |
|
1233 | 1232 | |
|
1234 | 1233 | |
|
1235 | 1234 | def changeaction(state, pos, action): |
|
1236 | 1235 | """Change the action state on the given position to the new action""" |
|
1237 | 1236 | rules = state[b'rules'] |
|
1238 | 1237 | assert 0 <= pos < len(rules) |
|
1239 | 1238 | rules[pos].action = action |
|
1240 | 1239 | |
|
1241 | 1240 | |
|
1242 | 1241 | def cycleaction(state, pos, next=False): |
|
1243 | 1242 | """Changes the action state the next or the previous action from |
|
1244 | 1243 | the action list""" |
|
1245 | 1244 | rules = state[b'rules'] |
|
1246 | 1245 | assert 0 <= pos < len(rules) |
|
1247 | 1246 | current = rules[pos].action |
|
1248 | 1247 | |
|
1249 | 1248 | assert current in KEY_LIST |
|
1250 | 1249 | |
|
1251 | 1250 | index = KEY_LIST.index(current) |
|
1252 | 1251 | if next: |
|
1253 | 1252 | index += 1 |
|
1254 | 1253 | else: |
|
1255 | 1254 | index -= 1 |
|
1256 | 1255 | changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)]) |
|
1257 | 1256 | |
|
1258 | 1257 | |
|
1259 | 1258 | def changeview(state, delta, unit): |
|
1260 | 1259 | '''Change the region of whatever is being viewed (a patch or the list of |
|
1261 | 1260 | changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.''' |
|
1262 | 1261 | mode, _ = state[b'mode'] |
|
1263 | 1262 | if mode != MODE_PATCH: |
|
1264 | 1263 | return |
|
1265 | 1264 | mode_state = state[b'modes'][mode] |
|
1266 | 1265 | num_lines = len(mode_state[b'patchcontents']) |
|
1267 | 1266 | page_height = state[b'page_height'] |
|
1268 | 1267 | unit = page_height if unit == b'page' else 1 |
|
1269 | 1268 | num_pages = 1 + (num_lines - 1) // page_height |
|
1270 | 1269 | max_offset = (num_pages - 1) * page_height |
|
1271 | 1270 | newline = mode_state[b'line_offset'] + delta * unit |
|
1272 | 1271 | mode_state[b'line_offset'] = max(0, min(max_offset, newline)) |
|
1273 | 1272 | |
|
1274 | 1273 | |
|
1275 | 1274 | def event(state, ch): |
|
1276 | 1275 | """Change state based on the current character input |
|
1277 | 1276 | |
|
1278 | 1277 | This takes the current state and based on the current character input from |
|
1279 | 1278 | the user we change the state. |
|
1280 | 1279 | """ |
|
1281 | 1280 | selected = state[b'selected'] |
|
1282 | 1281 | oldpos = state[b'pos'] |
|
1283 | 1282 | rules = state[b'rules'] |
|
1284 | 1283 | |
|
1285 | 1284 | if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"): |
|
1286 | 1285 | return E_RESIZE |
|
1287 | 1286 | |
|
1288 | 1287 | lookup_ch = ch |
|
1289 | 1288 | if ch is not None and b'0' <= ch <= b'9': |
|
1290 | 1289 | lookup_ch = b'0' |
|
1291 | 1290 | |
|
1292 | 1291 | curmode, prevmode = state[b'mode'] |
|
1293 | 1292 | action = KEYTABLE[curmode].get( |
|
1294 | 1293 | lookup_ch, KEYTABLE[b'global'].get(lookup_ch) |
|
1295 | 1294 | ) |
|
1296 | 1295 | if action is None: |
|
1297 | 1296 | return |
|
1298 | 1297 | if action in (b'down', b'move-down'): |
|
1299 | 1298 | newpos = min(oldpos + 1, len(rules) - 1) |
|
1300 | 1299 | movecursor(state, oldpos, newpos) |
|
1301 | 1300 | if selected is not None or action == b'move-down': |
|
1302 | 1301 | swap(state, oldpos, newpos) |
|
1303 | 1302 | elif action in (b'up', b'move-up'): |
|
1304 | 1303 | newpos = max(0, oldpos - 1) |
|
1305 | 1304 | movecursor(state, oldpos, newpos) |
|
1306 | 1305 | if selected is not None or action == b'move-up': |
|
1307 | 1306 | swap(state, oldpos, newpos) |
|
1308 | 1307 | elif action == b'next-action': |
|
1309 | 1308 | cycleaction(state, oldpos, next=True) |
|
1310 | 1309 | elif action == b'prev-action': |
|
1311 | 1310 | cycleaction(state, oldpos, next=False) |
|
1312 | 1311 | elif action == b'select': |
|
1313 | 1312 | selected = oldpos if selected is None else None |
|
1314 | 1313 | makeselection(state, selected) |
|
1315 | 1314 | elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10: |
|
1316 | 1315 | newrule = next((r for r in rules if r.origpos == int(ch))) |
|
1317 | 1316 | movecursor(state, oldpos, newrule.pos) |
|
1318 | 1317 | if selected is not None: |
|
1319 | 1318 | swap(state, oldpos, newrule.pos) |
|
1320 | 1319 | elif action.startswith(b'action-'): |
|
1321 | 1320 | changeaction(state, oldpos, action[7:]) |
|
1322 | 1321 | elif action == b'showpatch': |
|
1323 | 1322 | changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode) |
|
1324 | 1323 | elif action == b'help': |
|
1325 | 1324 | changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode) |
|
1326 | 1325 | elif action == b'quit': |
|
1327 | 1326 | return E_QUIT |
|
1328 | 1327 | elif action == b'histedit': |
|
1329 | 1328 | return E_HISTEDIT |
|
1330 | 1329 | elif action == b'page-down': |
|
1331 | 1330 | return E_PAGEDOWN |
|
1332 | 1331 | elif action == b'page-up': |
|
1333 | 1332 | return E_PAGEUP |
|
1334 | 1333 | elif action == b'line-down': |
|
1335 | 1334 | return E_LINEDOWN |
|
1336 | 1335 | elif action == b'line-up': |
|
1337 | 1336 | return E_LINEUP |
|
1338 | 1337 | |
|
1339 | 1338 | |
|
1340 | 1339 | def makecommands(rules): |
|
1341 | 1340 | """Returns a list of commands consumable by histedit --commands based on |
|
1342 | 1341 | our list of rules""" |
|
1343 | 1342 | commands = [] |
|
1344 | 1343 | for rules in rules: |
|
1345 | 1344 | commands.append(b'%s %s\n' % (rules.action, rules.ctx)) |
|
1346 | 1345 | return commands |
|
1347 | 1346 | |
|
1348 | 1347 | |
|
1349 | 1348 | def addln(win, y, x, line, color=None): |
|
1350 | 1349 | """Add a line to the given window left padding but 100% filled with |
|
1351 | 1350 | whitespace characters, so that the color appears on the whole line""" |
|
1352 | 1351 | maxy, maxx = win.getmaxyx() |
|
1353 | 1352 | length = maxx - 1 - x |
|
1354 | 1353 | line = bytes(line).ljust(length)[:length] |
|
1355 | 1354 | if y < 0: |
|
1356 | 1355 | y = maxy + y |
|
1357 | 1356 | if x < 0: |
|
1358 | 1357 | x = maxx + x |
|
1359 | 1358 | if color: |
|
1360 | 1359 | win.addstr(y, x, line, color) |
|
1361 | 1360 | else: |
|
1362 | 1361 | win.addstr(y, x, line) |
|
1363 | 1362 | |
|
1364 | 1363 | |
|
1365 | 1364 | def _trunc_head(line, n): |
|
1366 | 1365 | if len(line) <= n: |
|
1367 | 1366 | return line |
|
1368 | 1367 | return b'> ' + line[-(n - 2) :] |
|
1369 | 1368 | |
|
1370 | 1369 | |
|
1371 | 1370 | def _trunc_tail(line, n): |
|
1372 | 1371 | if len(line) <= n: |
|
1373 | 1372 | return line |
|
1374 | 1373 | return line[: n - 2] + b' >' |
|
1375 | 1374 | |
|
1376 | 1375 | |
|
1377 | 1376 | def patchcontents(state): |
|
1378 | 1377 | repo = state[b'repo'] |
|
1379 | 1378 | rule = state[b'rules'][state[b'pos']] |
|
1380 | 1379 | displayer = logcmdutil.changesetdisplayer( |
|
1381 | 1380 | repo.ui, repo, {b"patch": True, b"template": b"status"}, buffered=True |
|
1382 | 1381 | ) |
|
1383 | 1382 | overrides = {(b'ui', b'verbose'): True} |
|
1384 | 1383 | with repo.ui.configoverride(overrides, source=b'histedit'): |
|
1385 | 1384 | displayer.show(rule.ctx) |
|
1386 | 1385 | displayer.close() |
|
1387 | 1386 | return displayer.hunk[rule.ctx.rev()].splitlines() |
|
1388 | 1387 | |
|
1389 | 1388 | |
|
1390 | 1389 | def _chisteditmain(repo, rules, stdscr): |
|
1391 | 1390 | try: |
|
1392 | 1391 | curses.use_default_colors() |
|
1393 | 1392 | except curses.error: |
|
1394 | 1393 | pass |
|
1395 | 1394 | |
|
1396 | 1395 | # initialize color pattern |
|
1397 | 1396 | curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE) |
|
1398 | 1397 | curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE) |
|
1399 | 1398 | curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW) |
|
1400 | 1399 | curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN) |
|
1401 | 1400 | curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA) |
|
1402 | 1401 | curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1) |
|
1403 | 1402 | curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1) |
|
1404 | 1403 | curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1) |
|
1405 | 1404 | curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1) |
|
1406 | 1405 | curses.init_pair( |
|
1407 | 1406 | COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA |
|
1408 | 1407 | ) |
|
1409 | 1408 | curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE) |
|
1410 | 1409 | |
|
1411 | 1410 | # don't display the cursor |
|
1412 | 1411 | try: |
|
1413 | 1412 | curses.curs_set(0) |
|
1414 | 1413 | except curses.error: |
|
1415 | 1414 | pass |
|
1416 | 1415 | |
|
1417 | 1416 | def rendercommit(win, state): |
|
1418 | 1417 | """Renders the commit window that shows the log of the current selected |
|
1419 | 1418 | commit""" |
|
1420 | 1419 | pos = state[b'pos'] |
|
1421 | 1420 | rules = state[b'rules'] |
|
1422 | 1421 | rule = rules[pos] |
|
1423 | 1422 | |
|
1424 | 1423 | ctx = rule.ctx |
|
1425 | 1424 | win.box() |
|
1426 | 1425 | |
|
1427 | 1426 | maxy, maxx = win.getmaxyx() |
|
1428 | 1427 | length = maxx - 3 |
|
1429 | 1428 | |
|
1430 | 1429 | line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12]) |
|
1431 | 1430 | win.addstr(1, 1, line[:length]) |
|
1432 | 1431 | |
|
1433 | 1432 | line = b"user: %s" % ctx.user() |
|
1434 | 1433 | win.addstr(2, 1, line[:length]) |
|
1435 | 1434 | |
|
1436 | 1435 | bms = repo.nodebookmarks(ctx.node()) |
|
1437 | 1436 | line = b"bookmark: %s" % b' '.join(bms) |
|
1438 | 1437 | win.addstr(3, 1, line[:length]) |
|
1439 | 1438 | |
|
1440 | 1439 | line = b"summary: %s" % (ctx.description().splitlines()[0]) |
|
1441 | 1440 | win.addstr(4, 1, line[:length]) |
|
1442 | 1441 | |
|
1443 | 1442 | line = b"files: " |
|
1444 | 1443 | win.addstr(5, 1, line) |
|
1445 | 1444 | fnx = 1 + len(line) |
|
1446 | 1445 | fnmaxx = length - fnx + 1 |
|
1447 | 1446 | y = 5 |
|
1448 | 1447 | fnmaxn = maxy - (1 + y) - 1 |
|
1449 | 1448 | files = ctx.files() |
|
1450 | 1449 | for i, line1 in enumerate(files): |
|
1451 | 1450 | if len(files) > fnmaxn and i == fnmaxn - 1: |
|
1452 | 1451 | win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx)) |
|
1453 | 1452 | y = y + 1 |
|
1454 | 1453 | break |
|
1455 | 1454 | win.addstr(y, fnx, _trunc_head(line1, fnmaxx)) |
|
1456 | 1455 | y = y + 1 |
|
1457 | 1456 | |
|
1458 | 1457 | conflicts = rule.conflicts |
|
1459 | 1458 | if len(conflicts) > 0: |
|
1460 | 1459 | conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts)) |
|
1461 | 1460 | conflictstr = b"changed files overlap with %s" % conflictstr |
|
1462 | 1461 | else: |
|
1463 | 1462 | conflictstr = b'no overlap' |
|
1464 | 1463 | |
|
1465 | 1464 | win.addstr(y, 1, conflictstr[:length]) |
|
1466 | 1465 | win.noutrefresh() |
|
1467 | 1466 | |
|
1468 | 1467 | def helplines(mode): |
|
1469 | 1468 | if mode == MODE_PATCH: |
|
1470 | 1469 | help = b"""\ |
|
1471 | 1470 | ?: help, k/up: line up, j/down: line down, v: stop viewing patch |
|
1472 | 1471 | pgup: prev page, space/pgdn: next page, c: commit, q: abort |
|
1473 | 1472 | """ |
|
1474 | 1473 | else: |
|
1475 | 1474 | help = b"""\ |
|
1476 | 1475 | ?: help, k/up: move up, j/down: move down, space: select, v: view patch |
|
1477 | 1476 | d: drop, e: edit, f: fold, m: mess, p: pick, r: roll |
|
1478 | 1477 | pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort |
|
1479 | 1478 | """ |
|
1480 | 1479 | return help.splitlines() |
|
1481 | 1480 | |
|
1482 | 1481 | def renderhelp(win, state): |
|
1483 | 1482 | maxy, maxx = win.getmaxyx() |
|
1484 | 1483 | mode, _ = state[b'mode'] |
|
1485 | 1484 | for y, line in enumerate(helplines(mode)): |
|
1486 | 1485 | if y >= maxy: |
|
1487 | 1486 | break |
|
1488 | 1487 | addln(win, y, 0, line, curses.color_pair(COLOR_HELP)) |
|
1489 | 1488 | win.noutrefresh() |
|
1490 | 1489 | |
|
1491 | 1490 | def renderrules(rulesscr, state): |
|
1492 | 1491 | rules = state[b'rules'] |
|
1493 | 1492 | pos = state[b'pos'] |
|
1494 | 1493 | selected = state[b'selected'] |
|
1495 | 1494 | start = state[b'modes'][MODE_RULES][b'line_offset'] |
|
1496 | 1495 | |
|
1497 | 1496 | conflicts = [r.ctx for r in rules if r.conflicts] |
|
1498 | 1497 | if len(conflicts) > 0: |
|
1499 | 1498 | line = b"potential conflict in %s" % b','.join( |
|
1500 | 1499 | map(pycompat.bytestr, conflicts) |
|
1501 | 1500 | ) |
|
1502 | 1501 | addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN)) |
|
1503 | 1502 | |
|
1504 | 1503 | for y, rule in enumerate(rules[start:]): |
|
1505 | 1504 | if y >= state[b'page_height']: |
|
1506 | 1505 | break |
|
1507 | 1506 | if len(rule.conflicts) > 0: |
|
1508 | 1507 | rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN)) |
|
1509 | 1508 | else: |
|
1510 | 1509 | rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK) |
|
1511 | 1510 | |
|
1512 | 1511 | if y + start == selected: |
|
1513 | 1512 | rollcolor = COLOR_ROLL_SELECTED |
|
1514 | 1513 | addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED)) |
|
1515 | 1514 | elif y + start == pos: |
|
1516 | 1515 | rollcolor = COLOR_ROLL_CURRENT |
|
1517 | 1516 | addln( |
|
1518 | 1517 | rulesscr, |
|
1519 | 1518 | y, |
|
1520 | 1519 | 2, |
|
1521 | 1520 | rule, |
|
1522 | 1521 | curses.color_pair(COLOR_CURRENT) | curses.A_BOLD, |
|
1523 | 1522 | ) |
|
1524 | 1523 | else: |
|
1525 | 1524 | rollcolor = COLOR_ROLL |
|
1526 | 1525 | addln(rulesscr, y, 2, rule) |
|
1527 | 1526 | |
|
1528 | 1527 | if rule.action == b'roll': |
|
1529 | 1528 | rulesscr.addstr( |
|
1530 | 1529 | y, |
|
1531 | 1530 | 2 + len(rule.prefix), |
|
1532 | 1531 | rule.desc, |
|
1533 | 1532 | curses.color_pair(rollcolor), |
|
1534 | 1533 | ) |
|
1535 | 1534 | |
|
1536 | 1535 | rulesscr.noutrefresh() |
|
1537 | 1536 | |
|
1538 | 1537 | def renderstring(win, state, output, diffcolors=False): |
|
1539 | 1538 | maxy, maxx = win.getmaxyx() |
|
1540 | 1539 | length = min(maxy - 1, len(output)) |
|
1541 | 1540 | for y in range(0, length): |
|
1542 | 1541 | line = output[y] |
|
1543 | 1542 | if diffcolors: |
|
1544 | 1543 | if line and line[0] == b'+': |
|
1545 | 1544 | win.addstr( |
|
1546 | 1545 | y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE) |
|
1547 | 1546 | ) |
|
1548 | 1547 | elif line and line[0] == b'-': |
|
1549 | 1548 | win.addstr( |
|
1550 | 1549 | y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE) |
|
1551 | 1550 | ) |
|
1552 | 1551 | elif line.startswith(b'@@ '): |
|
1553 | 1552 | win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET)) |
|
1554 | 1553 | else: |
|
1555 | 1554 | win.addstr(y, 0, line) |
|
1556 | 1555 | else: |
|
1557 | 1556 | win.addstr(y, 0, line) |
|
1558 | 1557 | win.noutrefresh() |
|
1559 | 1558 | |
|
1560 | 1559 | def renderpatch(win, state): |
|
1561 | 1560 | start = state[b'modes'][MODE_PATCH][b'line_offset'] |
|
1562 | 1561 | content = state[b'modes'][MODE_PATCH][b'patchcontents'] |
|
1563 | 1562 | renderstring(win, state, content[start:], diffcolors=True) |
|
1564 | 1563 | |
|
1565 | 1564 | def layout(mode): |
|
1566 | 1565 | maxy, maxx = stdscr.getmaxyx() |
|
1567 | 1566 | helplen = len(helplines(mode)) |
|
1568 | 1567 | return { |
|
1569 | 1568 | b'commit': (12, maxx), |
|
1570 | 1569 | b'help': (helplen, maxx), |
|
1571 | 1570 | b'main': (maxy - helplen - 12, maxx), |
|
1572 | 1571 | } |
|
1573 | 1572 | |
|
1574 | 1573 | def drawvertwin(size, y, x): |
|
1575 | 1574 | win = curses.newwin(size[0], size[1], y, x) |
|
1576 | 1575 | y += size[0] |
|
1577 | 1576 | return win, y, x |
|
1578 | 1577 | |
|
1579 | 1578 | state = { |
|
1580 | 1579 | b'pos': 0, |
|
1581 | 1580 | b'rules': rules, |
|
1582 | 1581 | b'selected': None, |
|
1583 | 1582 | b'mode': (MODE_INIT, MODE_INIT), |
|
1584 | 1583 | b'page_height': None, |
|
1585 | 1584 | b'modes': { |
|
1586 | 1585 | MODE_RULES: {b'line_offset': 0,}, |
|
1587 | 1586 | MODE_PATCH: {b'line_offset': 0,}, |
|
1588 | 1587 | }, |
|
1589 | 1588 | b'repo': repo, |
|
1590 | 1589 | } |
|
1591 | 1590 | |
|
1592 | 1591 | # eventloop |
|
1593 | 1592 | ch = None |
|
1594 | 1593 | stdscr.clear() |
|
1595 | 1594 | stdscr.refresh() |
|
1596 | 1595 | while True: |
|
1597 | 1596 | try: |
|
1598 | 1597 | oldmode, _ = state[b'mode'] |
|
1599 | 1598 | if oldmode == MODE_INIT: |
|
1600 | 1599 | changemode(state, MODE_RULES) |
|
1601 | 1600 | e = event(state, ch) |
|
1602 | 1601 | |
|
1603 | 1602 | if e == E_QUIT: |
|
1604 | 1603 | return False |
|
1605 | 1604 | if e == E_HISTEDIT: |
|
1606 | 1605 | return state[b'rules'] |
|
1607 | 1606 | else: |
|
1608 | 1607 | if e == E_RESIZE: |
|
1609 | 1608 | size = screen_size() |
|
1610 | 1609 | if size != stdscr.getmaxyx(): |
|
1611 | 1610 | curses.resizeterm(*size) |
|
1612 | 1611 | |
|
1613 | 1612 | curmode, _ = state[b'mode'] |
|
1614 | 1613 | sizes = layout(curmode) |
|
1615 | 1614 | if curmode != oldmode: |
|
1616 | 1615 | state[b'page_height'] = sizes[b'main'][0] |
|
1617 | 1616 | # Adjust the view to fit the current screen size. |
|
1618 | 1617 | movecursor(state, state[b'pos'], state[b'pos']) |
|
1619 | 1618 | |
|
1620 | 1619 | # Pack the windows against the top, each pane spread across the |
|
1621 | 1620 | # full width of the screen. |
|
1622 | 1621 | y, x = (0, 0) |
|
1623 | 1622 | helpwin, y, x = drawvertwin(sizes[b'help'], y, x) |
|
1624 | 1623 | mainwin, y, x = drawvertwin(sizes[b'main'], y, x) |
|
1625 | 1624 | commitwin, y, x = drawvertwin(sizes[b'commit'], y, x) |
|
1626 | 1625 | |
|
1627 | 1626 | if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP): |
|
1628 | 1627 | if e == E_PAGEDOWN: |
|
1629 | 1628 | changeview(state, +1, b'page') |
|
1630 | 1629 | elif e == E_PAGEUP: |
|
1631 | 1630 | changeview(state, -1, b'page') |
|
1632 | 1631 | elif e == E_LINEDOWN: |
|
1633 | 1632 | changeview(state, +1, b'line') |
|
1634 | 1633 | elif e == E_LINEUP: |
|
1635 | 1634 | changeview(state, -1, b'line') |
|
1636 | 1635 | |
|
1637 | 1636 | # start rendering |
|
1638 | 1637 | commitwin.erase() |
|
1639 | 1638 | helpwin.erase() |
|
1640 | 1639 | mainwin.erase() |
|
1641 | 1640 | if curmode == MODE_PATCH: |
|
1642 | 1641 | renderpatch(mainwin, state) |
|
1643 | 1642 | elif curmode == MODE_HELP: |
|
1644 | 1643 | renderstring(mainwin, state, __doc__.strip().splitlines()) |
|
1645 | 1644 | else: |
|
1646 | 1645 | renderrules(mainwin, state) |
|
1647 | 1646 | rendercommit(commitwin, state) |
|
1648 | 1647 | renderhelp(helpwin, state) |
|
1649 | 1648 | curses.doupdate() |
|
1650 | 1649 | # done rendering |
|
1651 | 1650 | ch = encoding.strtolocal(stdscr.getkey()) |
|
1652 | 1651 | except curses.error: |
|
1653 | 1652 | pass |
|
1654 | 1653 | |
|
1655 | 1654 | |
|
1656 | 1655 | def _chistedit(ui, repo, freeargs, opts): |
|
1657 | 1656 | """interactively edit changeset history via a curses interface |
|
1658 | 1657 | |
|
1659 | 1658 | Provides a ncurses interface to histedit. Press ? in chistedit mode |
|
1660 | 1659 | to see an extensive help. Requires python-curses to be installed.""" |
|
1661 | 1660 | |
|
1662 | 1661 | if curses is None: |
|
1663 | 1662 | raise error.Abort(_(b"Python curses library required")) |
|
1664 | 1663 | |
|
1665 | 1664 | # disable color |
|
1666 | 1665 | ui._colormode = None |
|
1667 | 1666 | |
|
1668 | 1667 | try: |
|
1669 | 1668 | keep = opts.get(b'keep') |
|
1670 | 1669 | revs = opts.get(b'rev', [])[:] |
|
1671 | 1670 | cmdutil.checkunfinished(repo) |
|
1672 | 1671 | cmdutil.bailifchanged(repo) |
|
1673 | 1672 | |
|
1674 | 1673 | if os.path.exists(os.path.join(repo.path, b'histedit-state')): |
|
1675 | 1674 | raise error.Abort( |
|
1676 | 1675 | _( |
|
1677 | 1676 | b'history edit already in progress, try ' |
|
1678 | 1677 | b'--continue or --abort' |
|
1679 | 1678 | ) |
|
1680 | 1679 | ) |
|
1681 | 1680 | revs.extend(freeargs) |
|
1682 | 1681 | if not revs: |
|
1683 | 1682 | defaultrev = destutil.desthistedit(ui, repo) |
|
1684 | 1683 | if defaultrev is not None: |
|
1685 | 1684 | revs.append(defaultrev) |
|
1686 | 1685 | if len(revs) != 1: |
|
1687 | 1686 | raise error.Abort( |
|
1688 | 1687 | _(b'histedit requires exactly one ancestor revision') |
|
1689 | 1688 | ) |
|
1690 | 1689 | |
|
1691 | 1690 | rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs))) |
|
1692 | 1691 | if len(rr) != 1: |
|
1693 | 1692 | raise error.Abort( |
|
1694 | 1693 | _( |
|
1695 | 1694 | b'The specified revisions must have ' |
|
1696 | 1695 | b'exactly one common root' |
|
1697 | 1696 | ) |
|
1698 | 1697 | ) |
|
1699 | 1698 | root = rr[0].node() |
|
1700 | 1699 | |
|
1701 | 1700 | topmost = repo.dirstate.p1() |
|
1702 | 1701 | revs = between(repo, root, topmost, keep) |
|
1703 | 1702 | if not revs: |
|
1704 | 1703 | raise error.Abort( |
|
1705 | 1704 | _(b'%s is not an ancestor of working directory') |
|
1706 | 1705 | % node.short(root) |
|
1707 | 1706 | ) |
|
1708 | 1707 | |
|
1709 | 1708 | ctxs = [] |
|
1710 | 1709 | for i, r in enumerate(revs): |
|
1711 | 1710 | ctxs.append(histeditrule(ui, repo[r], i)) |
|
1712 | 1711 | with util.with_lc_ctype(): |
|
1713 | 1712 | rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs)) |
|
1714 | 1713 | curses.echo() |
|
1715 | 1714 | curses.endwin() |
|
1716 | 1715 | if rc is False: |
|
1717 | 1716 | ui.write(_(b"histedit aborted\n")) |
|
1718 | 1717 | return 0 |
|
1719 | 1718 | if type(rc) is list: |
|
1720 | 1719 | ui.status(_(b"performing changes\n")) |
|
1721 | 1720 | rules = makecommands(rc) |
|
1722 | 1721 | with repo.vfs(b'chistedit', b'w+') as fp: |
|
1723 | 1722 | for r in rules: |
|
1724 | 1723 | fp.write(r) |
|
1725 | 1724 | opts[b'commands'] = fp.name |
|
1726 | 1725 | return _texthistedit(ui, repo, freeargs, opts) |
|
1727 | 1726 | except KeyboardInterrupt: |
|
1728 | 1727 | pass |
|
1729 | 1728 | return -1 |
|
1730 | 1729 | |
|
1731 | 1730 | |
|
1732 | 1731 | @command( |
|
1733 | 1732 | b'histedit', |
|
1734 | 1733 | [ |
|
1735 | 1734 | ( |
|
1736 | 1735 | b'', |
|
1737 | 1736 | b'commands', |
|
1738 | 1737 | b'', |
|
1739 | 1738 | _(b'read history edits from the specified file'), |
|
1740 | 1739 | _(b'FILE'), |
|
1741 | 1740 | ), |
|
1742 | 1741 | (b'c', b'continue', False, _(b'continue an edit already in progress')), |
|
1743 | 1742 | (b'', b'edit-plan', False, _(b'edit remaining actions list')), |
|
1744 | 1743 | ( |
|
1745 | 1744 | b'k', |
|
1746 | 1745 | b'keep', |
|
1747 | 1746 | False, |
|
1748 | 1747 | _(b"don't strip old nodes after edit is complete"), |
|
1749 | 1748 | ), |
|
1750 | 1749 | (b'', b'abort', False, _(b'abort an edit in progress')), |
|
1751 | 1750 | (b'o', b'outgoing', False, _(b'changesets not found in destination')), |
|
1752 | 1751 | ( |
|
1753 | 1752 | b'f', |
|
1754 | 1753 | b'force', |
|
1755 | 1754 | False, |
|
1756 | 1755 | _(b'force outgoing even for unrelated repositories'), |
|
1757 | 1756 | ), |
|
1758 | 1757 | (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')), |
|
1759 | 1758 | ] |
|
1760 | 1759 | + cmdutil.formatteropts, |
|
1761 | 1760 | _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"), |
|
1762 | 1761 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, |
|
1763 | 1762 | ) |
|
1764 | 1763 | def histedit(ui, repo, *freeargs, **opts): |
|
1765 | 1764 | """interactively edit changeset history |
|
1766 | 1765 | |
|
1767 | 1766 | This command lets you edit a linear series of changesets (up to |
|
1768 | 1767 | and including the working directory, which should be clean). |
|
1769 | 1768 | You can: |
|
1770 | 1769 | |
|
1771 | 1770 | - `pick` to [re]order a changeset |
|
1772 | 1771 | |
|
1773 | 1772 | - `drop` to omit changeset |
|
1774 | 1773 | |
|
1775 | 1774 | - `mess` to reword the changeset commit message |
|
1776 | 1775 | |
|
1777 | 1776 | - `fold` to combine it with the preceding changeset (using the later date) |
|
1778 | 1777 | |
|
1779 | 1778 | - `roll` like fold, but discarding this commit's description and date |
|
1780 | 1779 | |
|
1781 | 1780 | - `edit` to edit this changeset (preserving date) |
|
1782 | 1781 | |
|
1783 | 1782 | - `base` to checkout changeset and apply further changesets from there |
|
1784 | 1783 | |
|
1785 | 1784 | There are a number of ways to select the root changeset: |
|
1786 | 1785 | |
|
1787 | 1786 | - Specify ANCESTOR directly |
|
1788 | 1787 | |
|
1789 | 1788 | - Use --outgoing -- it will be the first linear changeset not |
|
1790 | 1789 | included in destination. (See :hg:`help config.paths.default-push`) |
|
1791 | 1790 | |
|
1792 | 1791 | - Otherwise, the value from the "histedit.defaultrev" config option |
|
1793 | 1792 | is used as a revset to select the base revision when ANCESTOR is not |
|
1794 | 1793 | specified. The first revision returned by the revset is used. By |
|
1795 | 1794 | default, this selects the editable history that is unique to the |
|
1796 | 1795 | ancestry of the working directory. |
|
1797 | 1796 | |
|
1798 | 1797 | .. container:: verbose |
|
1799 | 1798 | |
|
1800 | 1799 | If you use --outgoing, this command will abort if there are ambiguous |
|
1801 | 1800 | outgoing revisions. For example, if there are multiple branches |
|
1802 | 1801 | containing outgoing revisions. |
|
1803 | 1802 | |
|
1804 | 1803 | Use "min(outgoing() and ::.)" or similar revset specification |
|
1805 | 1804 | instead of --outgoing to specify edit target revision exactly in |
|
1806 | 1805 | such ambiguous situation. See :hg:`help revsets` for detail about |
|
1807 | 1806 | selecting revisions. |
|
1808 | 1807 | |
|
1809 | 1808 | .. container:: verbose |
|
1810 | 1809 | |
|
1811 | 1810 | Examples: |
|
1812 | 1811 | |
|
1813 | 1812 | - A number of changes have been made. |
|
1814 | 1813 | Revision 3 is no longer needed. |
|
1815 | 1814 | |
|
1816 | 1815 | Start history editing from revision 3:: |
|
1817 | 1816 | |
|
1818 | 1817 | hg histedit -r 3 |
|
1819 | 1818 | |
|
1820 | 1819 | An editor opens, containing the list of revisions, |
|
1821 | 1820 | with specific actions specified:: |
|
1822 | 1821 | |
|
1823 | 1822 | pick 5339bf82f0ca 3 Zworgle the foobar |
|
1824 | 1823 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
1825 | 1824 | pick 0a9639fcda9d 5 Morgify the cromulancy |
|
1826 | 1825 | |
|
1827 | 1826 | Additional information about the possible actions |
|
1828 | 1827 | to take appears below the list of revisions. |
|
1829 | 1828 | |
|
1830 | 1829 | To remove revision 3 from the history, |
|
1831 | 1830 | its action (at the beginning of the relevant line) |
|
1832 | 1831 | is changed to 'drop':: |
|
1833 | 1832 | |
|
1834 | 1833 | drop 5339bf82f0ca 3 Zworgle the foobar |
|
1835 | 1834 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
1836 | 1835 | pick 0a9639fcda9d 5 Morgify the cromulancy |
|
1837 | 1836 | |
|
1838 | 1837 | - A number of changes have been made. |
|
1839 | 1838 | Revision 2 and 4 need to be swapped. |
|
1840 | 1839 | |
|
1841 | 1840 | Start history editing from revision 2:: |
|
1842 | 1841 | |
|
1843 | 1842 | hg histedit -r 2 |
|
1844 | 1843 | |
|
1845 | 1844 | An editor opens, containing the list of revisions, |
|
1846 | 1845 | with specific actions specified:: |
|
1847 | 1846 | |
|
1848 | 1847 | pick 252a1af424ad 2 Blorb a morgwazzle |
|
1849 | 1848 | pick 5339bf82f0ca 3 Zworgle the foobar |
|
1850 | 1849 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
1851 | 1850 | |
|
1852 | 1851 | To swap revision 2 and 4, its lines are swapped |
|
1853 | 1852 | in the editor:: |
|
1854 | 1853 | |
|
1855 | 1854 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
1856 | 1855 | pick 5339bf82f0ca 3 Zworgle the foobar |
|
1857 | 1856 | pick 252a1af424ad 2 Blorb a morgwazzle |
|
1858 | 1857 | |
|
1859 | 1858 | Returns 0 on success, 1 if user intervention is required (not only |
|
1860 | 1859 | for intentional "edit" command, but also for resolving unexpected |
|
1861 | 1860 | conflicts). |
|
1862 | 1861 | """ |
|
1863 | 1862 | opts = pycompat.byteskwargs(opts) |
|
1864 | 1863 | |
|
1865 | 1864 | # kludge: _chistedit only works for starting an edit, not aborting |
|
1866 | 1865 | # or continuing, so fall back to regular _texthistedit for those |
|
1867 | 1866 | # operations. |
|
1868 | 1867 | if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew: |
|
1869 | 1868 | return _chistedit(ui, repo, freeargs, opts) |
|
1870 | 1869 | return _texthistedit(ui, repo, freeargs, opts) |
|
1871 | 1870 | |
|
1872 | 1871 | |
|
1873 | 1872 | def _texthistedit(ui, repo, freeargs, opts): |
|
1874 | 1873 | state = histeditstate(repo) |
|
1875 | 1874 | with repo.wlock() as wlock, repo.lock() as lock: |
|
1876 | 1875 | state.wlock = wlock |
|
1877 | 1876 | state.lock = lock |
|
1878 | 1877 | _histedit(ui, repo, state, freeargs, opts) |
|
1879 | 1878 | |
|
1880 | 1879 | |
|
1881 | 1880 | goalcontinue = b'continue' |
|
1882 | 1881 | goalabort = b'abort' |
|
1883 | 1882 | goaleditplan = b'edit-plan' |
|
1884 | 1883 | goalnew = b'new' |
|
1885 | 1884 | |
|
1886 | 1885 | |
|
1887 | 1886 | def _getgoal(opts): |
|
1888 | 1887 | if opts.get(b'continue'): |
|
1889 | 1888 | return goalcontinue |
|
1890 | 1889 | if opts.get(b'abort'): |
|
1891 | 1890 | return goalabort |
|
1892 | 1891 | if opts.get(b'edit_plan'): |
|
1893 | 1892 | return goaleditplan |
|
1894 | 1893 | return goalnew |
|
1895 | 1894 | |
|
1896 | 1895 | |
|
1897 | 1896 | def _readfile(ui, path): |
|
1898 | 1897 | if path == b'-': |
|
1899 | 1898 | with ui.timeblockedsection(b'histedit'): |
|
1900 | 1899 | return ui.fin.read() |
|
1901 | 1900 | else: |
|
1902 | 1901 | with open(path, b'rb') as f: |
|
1903 | 1902 | return f.read() |
|
1904 | 1903 | |
|
1905 | 1904 | |
|
1906 | 1905 | def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs): |
|
1907 | 1906 | # TODO only abort if we try to histedit mq patches, not just |
|
1908 | 1907 | # blanket if mq patches are applied somewhere |
|
1909 | 1908 | mq = getattr(repo, 'mq', None) |
|
1910 | 1909 | if mq and mq.applied: |
|
1911 | 1910 | raise error.Abort(_(b'source has mq patches applied')) |
|
1912 | 1911 | |
|
1913 | 1912 | # basic argument incompatibility processing |
|
1914 | 1913 | outg = opts.get(b'outgoing') |
|
1915 | 1914 | editplan = opts.get(b'edit_plan') |
|
1916 | 1915 | abort = opts.get(b'abort') |
|
1917 | 1916 | force = opts.get(b'force') |
|
1918 | 1917 | if force and not outg: |
|
1919 | 1918 | raise error.Abort(_(b'--force only allowed with --outgoing')) |
|
1920 | 1919 | if goal == b'continue': |
|
1921 | 1920 | if any((outg, abort, revs, freeargs, rules, editplan)): |
|
1922 | 1921 | raise error.Abort(_(b'no arguments allowed with --continue')) |
|
1923 | 1922 | elif goal == b'abort': |
|
1924 | 1923 | if any((outg, revs, freeargs, rules, editplan)): |
|
1925 | 1924 | raise error.Abort(_(b'no arguments allowed with --abort')) |
|
1926 | 1925 | elif goal == b'edit-plan': |
|
1927 | 1926 | if any((outg, revs, freeargs)): |
|
1928 | 1927 | raise error.Abort( |
|
1929 | 1928 | _(b'only --commands argument allowed with --edit-plan') |
|
1930 | 1929 | ) |
|
1931 | 1930 | else: |
|
1932 | 1931 | if state.inprogress(): |
|
1933 | 1932 | raise error.Abort( |
|
1934 | 1933 | _( |
|
1935 | 1934 | b'history edit already in progress, try ' |
|
1936 | 1935 | b'--continue or --abort' |
|
1937 | 1936 | ) |
|
1938 | 1937 | ) |
|
1939 | 1938 | if outg: |
|
1940 | 1939 | if revs: |
|
1941 | 1940 | raise error.Abort(_(b'no revisions allowed with --outgoing')) |
|
1942 | 1941 | if len(freeargs) > 1: |
|
1943 | 1942 | raise error.Abort( |
|
1944 | 1943 | _(b'only one repo argument allowed with --outgoing') |
|
1945 | 1944 | ) |
|
1946 | 1945 | else: |
|
1947 | 1946 | revs.extend(freeargs) |
|
1948 | 1947 | if len(revs) == 0: |
|
1949 | 1948 | defaultrev = destutil.desthistedit(ui, repo) |
|
1950 | 1949 | if defaultrev is not None: |
|
1951 | 1950 | revs.append(defaultrev) |
|
1952 | 1951 | |
|
1953 | 1952 | if len(revs) != 1: |
|
1954 | 1953 | raise error.Abort( |
|
1955 | 1954 | _(b'histedit requires exactly one ancestor revision') |
|
1956 | 1955 | ) |
|
1957 | 1956 | |
|
1958 | 1957 | |
|
1959 | 1958 | def _histedit(ui, repo, state, freeargs, opts): |
|
1960 | 1959 | fm = ui.formatter(b'histedit', opts) |
|
1961 | 1960 | fm.startitem() |
|
1962 | 1961 | goal = _getgoal(opts) |
|
1963 | 1962 | revs = opts.get(b'rev', []) |
|
1964 | 1963 | nobackup = not ui.configbool(b'rewrite', b'backup-bundle') |
|
1965 | 1964 | rules = opts.get(b'commands', b'') |
|
1966 | 1965 | state.keep = opts.get(b'keep', False) |
|
1967 | 1966 | |
|
1968 | 1967 | _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs) |
|
1969 | 1968 | |
|
1970 | 1969 | hastags = False |
|
1971 | 1970 | if revs: |
|
1972 | 1971 | revs = scmutil.revrange(repo, revs) |
|
1973 | 1972 | ctxs = [repo[rev] for rev in revs] |
|
1974 | 1973 | for ctx in ctxs: |
|
1975 | 1974 | tags = [tag for tag in ctx.tags() if tag != b'tip'] |
|
1976 | 1975 | if not hastags: |
|
1977 | 1976 | hastags = len(tags) |
|
1978 | 1977 | if hastags: |
|
1979 | 1978 | if ui.promptchoice( |
|
1980 | 1979 | _( |
|
1981 | 1980 | b'warning: tags associated with the given' |
|
1982 | 1981 | b' changeset will be lost after histedit.\n' |
|
1983 | 1982 | b'do you want to continue (yN)? $$ &Yes $$ &No' |
|
1984 | 1983 | ), |
|
1985 | 1984 | default=1, |
|
1986 | 1985 | ): |
|
1987 | 1986 | raise error.Abort(_(b'histedit cancelled\n')) |
|
1988 | 1987 | # rebuild state |
|
1989 | 1988 | if goal == goalcontinue: |
|
1990 | 1989 | state.read() |
|
1991 | 1990 | state = bootstrapcontinue(ui, state, opts) |
|
1992 | 1991 | elif goal == goaleditplan: |
|
1993 | 1992 | _edithisteditplan(ui, repo, state, rules) |
|
1994 | 1993 | return |
|
1995 | 1994 | elif goal == goalabort: |
|
1996 | 1995 | _aborthistedit(ui, repo, state, nobackup=nobackup) |
|
1997 | 1996 | return |
|
1998 | 1997 | else: |
|
1999 | 1998 | # goal == goalnew |
|
2000 | 1999 | _newhistedit(ui, repo, state, revs, freeargs, opts) |
|
2001 | 2000 | |
|
2002 | 2001 | _continuehistedit(ui, repo, state) |
|
2003 | 2002 | _finishhistedit(ui, repo, state, fm) |
|
2004 | 2003 | fm.end() |
|
2005 | 2004 | |
|
2006 | 2005 | |
|
2007 | 2006 | def _continuehistedit(ui, repo, state): |
|
2008 | 2007 | """This function runs after either: |
|
2009 | 2008 | - bootstrapcontinue (if the goal is 'continue') |
|
2010 | 2009 | - _newhistedit (if the goal is 'new') |
|
2011 | 2010 | """ |
|
2012 | 2011 | # preprocess rules so that we can hide inner folds from the user |
|
2013 | 2012 | # and only show one editor |
|
2014 | 2013 | actions = state.actions[:] |
|
2015 | 2014 | for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])): |
|
2016 | 2015 | if action.verb == b'fold' and nextact and nextact.verb == b'fold': |
|
2017 | 2016 | state.actions[idx].__class__ = _multifold |
|
2018 | 2017 | |
|
2019 | 2018 | # Force an initial state file write, so the user can run --abort/continue |
|
2020 | 2019 | # even if there's an exception before the first transaction serialize. |
|
2021 | 2020 | state.write() |
|
2022 | 2021 | |
|
2023 | 2022 | tr = None |
|
2024 | 2023 | # Don't use singletransaction by default since it rolls the entire |
|
2025 | 2024 | # transaction back if an unexpected exception happens (like a |
|
2026 | 2025 | # pretxncommit hook throws, or the user aborts the commit msg editor). |
|
2027 | 2026 | if ui.configbool(b"histedit", b"singletransaction"): |
|
2028 | 2027 | # Don't use a 'with' for the transaction, since actions may close |
|
2029 | 2028 | # and reopen a transaction. For example, if the action executes an |
|
2030 | 2029 | # external process it may choose to commit the transaction first. |
|
2031 | 2030 | tr = repo.transaction(b'histedit') |
|
2032 | 2031 | progress = ui.makeprogress( |
|
2033 | 2032 | _(b"editing"), unit=_(b'changes'), total=len(state.actions) |
|
2034 | 2033 | ) |
|
2035 | 2034 | with progress, util.acceptintervention(tr): |
|
2036 | 2035 | while state.actions: |
|
2037 | 2036 | state.write(tr=tr) |
|
2038 | 2037 | actobj = state.actions[0] |
|
2039 | 2038 | progress.increment(item=actobj.torule()) |
|
2040 | 2039 | ui.debug( |
|
2041 | 2040 | b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule()) |
|
2042 | 2041 | ) |
|
2043 | 2042 | parentctx, replacement_ = actobj.run() |
|
2044 | 2043 | state.parentctxnode = parentctx.node() |
|
2045 | 2044 | state.replacements.extend(replacement_) |
|
2046 | 2045 | state.actions.pop(0) |
|
2047 | 2046 | |
|
2048 | 2047 | state.write() |
|
2049 | 2048 | |
|
2050 | 2049 | |
|
2051 | 2050 | def _finishhistedit(ui, repo, state, fm): |
|
2052 | 2051 | """This action runs when histedit is finishing its session""" |
|
2053 |
|
|
|
2052 | mergemod.update(repo[state.parentctxnode]) | |
|
2054 | 2053 | |
|
2055 | 2054 | mapping, tmpnodes, created, ntm = processreplacement(state) |
|
2056 | 2055 | if mapping: |
|
2057 | 2056 | for prec, succs in pycompat.iteritems(mapping): |
|
2058 | 2057 | if not succs: |
|
2059 | 2058 | ui.debug(b'histedit: %s is dropped\n' % node.short(prec)) |
|
2060 | 2059 | else: |
|
2061 | 2060 | ui.debug( |
|
2062 | 2061 | b'histedit: %s is replaced by %s\n' |
|
2063 | 2062 | % (node.short(prec), node.short(succs[0])) |
|
2064 | 2063 | ) |
|
2065 | 2064 | if len(succs) > 1: |
|
2066 | 2065 | m = b'histedit: %s' |
|
2067 | 2066 | for n in succs[1:]: |
|
2068 | 2067 | ui.debug(m % node.short(n)) |
|
2069 | 2068 | |
|
2070 | 2069 | if not state.keep: |
|
2071 | 2070 | if mapping: |
|
2072 | 2071 | movetopmostbookmarks(repo, state.topmost, ntm) |
|
2073 | 2072 | # TODO update mq state |
|
2074 | 2073 | else: |
|
2075 | 2074 | mapping = {} |
|
2076 | 2075 | |
|
2077 | 2076 | for n in tmpnodes: |
|
2078 | 2077 | if n in repo: |
|
2079 | 2078 | mapping[n] = () |
|
2080 | 2079 | |
|
2081 | 2080 | # remove entries about unknown nodes |
|
2082 | 2081 | has_node = repo.unfiltered().changelog.index.has_node |
|
2083 | 2082 | mapping = { |
|
2084 | 2083 | k: v |
|
2085 | 2084 | for k, v in mapping.items() |
|
2086 | 2085 | if has_node(k) and all(has_node(n) for n in v) |
|
2087 | 2086 | } |
|
2088 | 2087 | scmutil.cleanupnodes(repo, mapping, b'histedit') |
|
2089 | 2088 | hf = fm.hexfunc |
|
2090 | 2089 | fl = fm.formatlist |
|
2091 | 2090 | fd = fm.formatdict |
|
2092 | 2091 | nodechanges = fd( |
|
2093 | 2092 | { |
|
2094 | 2093 | hf(oldn): fl([hf(n) for n in newn], name=b'node') |
|
2095 | 2094 | for oldn, newn in pycompat.iteritems(mapping) |
|
2096 | 2095 | }, |
|
2097 | 2096 | key=b"oldnode", |
|
2098 | 2097 | value=b"newnodes", |
|
2099 | 2098 | ) |
|
2100 | 2099 | fm.data(nodechanges=nodechanges) |
|
2101 | 2100 | |
|
2102 | 2101 | state.clear() |
|
2103 | 2102 | if os.path.exists(repo.sjoin(b'undo')): |
|
2104 | 2103 | os.unlink(repo.sjoin(b'undo')) |
|
2105 | 2104 | if repo.vfs.exists(b'histedit-last-edit.txt'): |
|
2106 | 2105 | repo.vfs.unlink(b'histedit-last-edit.txt') |
|
2107 | 2106 | |
|
2108 | 2107 | |
|
2109 | 2108 | def _aborthistedit(ui, repo, state, nobackup=False): |
|
2110 | 2109 | try: |
|
2111 | 2110 | state.read() |
|
2112 | 2111 | __, leafs, tmpnodes, __ = processreplacement(state) |
|
2113 | 2112 | ui.debug(b'restore wc to old parent %s\n' % node.short(state.topmost)) |
|
2114 | 2113 | |
|
2115 | 2114 | # Recover our old commits if necessary |
|
2116 | 2115 | if not state.topmost in repo and state.backupfile: |
|
2117 | 2116 | backupfile = repo.vfs.join(state.backupfile) |
|
2118 | 2117 | f = hg.openpath(ui, backupfile) |
|
2119 | 2118 | gen = exchange.readbundle(ui, f, backupfile) |
|
2120 | 2119 | with repo.transaction(b'histedit.abort') as tr: |
|
2121 | 2120 | bundle2.applybundle( |
|
2122 | 2121 | repo, |
|
2123 | 2122 | gen, |
|
2124 | 2123 | tr, |
|
2125 | 2124 | source=b'histedit', |
|
2126 | 2125 | url=b'bundle:' + backupfile, |
|
2127 | 2126 | ) |
|
2128 | 2127 | |
|
2129 | 2128 | os.remove(backupfile) |
|
2130 | 2129 | |
|
2131 | 2130 | # check whether we should update away |
|
2132 | 2131 | if repo.unfiltered().revs( |
|
2133 | 2132 | b'parents() and (%n or %ln::)', |
|
2134 | 2133 | state.parentctxnode, |
|
2135 | 2134 | leafs | tmpnodes, |
|
2136 | 2135 | ): |
|
2137 | 2136 | hg.clean(repo, state.topmost, show_stats=True, quietempty=True) |
|
2138 | 2137 | cleanupnode(ui, repo, tmpnodes, nobackup=nobackup) |
|
2139 | 2138 | cleanupnode(ui, repo, leafs, nobackup=nobackup) |
|
2140 | 2139 | except Exception: |
|
2141 | 2140 | if state.inprogress(): |
|
2142 | 2141 | ui.warn( |
|
2143 | 2142 | _( |
|
2144 | 2143 | b'warning: encountered an exception during histedit ' |
|
2145 | 2144 | b'--abort; the repository may not have been completely ' |
|
2146 | 2145 | b'cleaned up\n' |
|
2147 | 2146 | ) |
|
2148 | 2147 | ) |
|
2149 | 2148 | raise |
|
2150 | 2149 | finally: |
|
2151 | 2150 | state.clear() |
|
2152 | 2151 | |
|
2153 | 2152 | |
|
2154 | 2153 | def hgaborthistedit(ui, repo): |
|
2155 | 2154 | state = histeditstate(repo) |
|
2156 | 2155 | nobackup = not ui.configbool(b'rewrite', b'backup-bundle') |
|
2157 | 2156 | with repo.wlock() as wlock, repo.lock() as lock: |
|
2158 | 2157 | state.wlock = wlock |
|
2159 | 2158 | state.lock = lock |
|
2160 | 2159 | _aborthistedit(ui, repo, state, nobackup=nobackup) |
|
2161 | 2160 | |
|
2162 | 2161 | |
|
2163 | 2162 | def _edithisteditplan(ui, repo, state, rules): |
|
2164 | 2163 | state.read() |
|
2165 | 2164 | if not rules: |
|
2166 | 2165 | comment = geteditcomment( |
|
2167 | 2166 | ui, node.short(state.parentctxnode), node.short(state.topmost) |
|
2168 | 2167 | ) |
|
2169 | 2168 | rules = ruleeditor(repo, ui, state.actions, comment) |
|
2170 | 2169 | else: |
|
2171 | 2170 | rules = _readfile(ui, rules) |
|
2172 | 2171 | actions = parserules(rules, state) |
|
2173 | 2172 | ctxs = [repo[act.node] for act in state.actions if act.node] |
|
2174 | 2173 | warnverifyactions(ui, repo, actions, state, ctxs) |
|
2175 | 2174 | state.actions = actions |
|
2176 | 2175 | state.write() |
|
2177 | 2176 | |
|
2178 | 2177 | |
|
2179 | 2178 | def _newhistedit(ui, repo, state, revs, freeargs, opts): |
|
2180 | 2179 | outg = opts.get(b'outgoing') |
|
2181 | 2180 | rules = opts.get(b'commands', b'') |
|
2182 | 2181 | force = opts.get(b'force') |
|
2183 | 2182 | |
|
2184 | 2183 | cmdutil.checkunfinished(repo) |
|
2185 | 2184 | cmdutil.bailifchanged(repo) |
|
2186 | 2185 | |
|
2187 | 2186 | topmost = repo.dirstate.p1() |
|
2188 | 2187 | if outg: |
|
2189 | 2188 | if freeargs: |
|
2190 | 2189 | remote = freeargs[0] |
|
2191 | 2190 | else: |
|
2192 | 2191 | remote = None |
|
2193 | 2192 | root = findoutgoing(ui, repo, remote, force, opts) |
|
2194 | 2193 | else: |
|
2195 | 2194 | rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs))) |
|
2196 | 2195 | if len(rr) != 1: |
|
2197 | 2196 | raise error.Abort( |
|
2198 | 2197 | _( |
|
2199 | 2198 | b'The specified revisions must have ' |
|
2200 | 2199 | b'exactly one common root' |
|
2201 | 2200 | ) |
|
2202 | 2201 | ) |
|
2203 | 2202 | root = rr[0].node() |
|
2204 | 2203 | |
|
2205 | 2204 | revs = between(repo, root, topmost, state.keep) |
|
2206 | 2205 | if not revs: |
|
2207 | 2206 | raise error.Abort( |
|
2208 | 2207 | _(b'%s is not an ancestor of working directory') % node.short(root) |
|
2209 | 2208 | ) |
|
2210 | 2209 | |
|
2211 | 2210 | ctxs = [repo[r] for r in revs] |
|
2212 | 2211 | |
|
2213 | 2212 | wctx = repo[None] |
|
2214 | 2213 | # Please don't ask me why `ancestors` is this value. I figured it |
|
2215 | 2214 | # out with print-debugging, not by actually understanding what the |
|
2216 | 2215 | # merge code is doing. :( |
|
2217 | 2216 | ancs = [repo[b'.']] |
|
2218 | 2217 | # Sniff-test to make sure we won't collide with untracked files in |
|
2219 | 2218 | # the working directory. If we don't do this, we can get a |
|
2220 | 2219 | # collision after we've started histedit and backing out gets ugly |
|
2221 | 2220 | # for everyone, especially the user. |
|
2222 | 2221 | for c in [ctxs[0].p1()] + ctxs: |
|
2223 | 2222 | try: |
|
2224 | 2223 | mergemod.calculateupdates( |
|
2225 | 2224 | repo, |
|
2226 | 2225 | wctx, |
|
2227 | 2226 | c, |
|
2228 | 2227 | ancs, |
|
2229 | 2228 | # These parameters were determined by print-debugging |
|
2230 | 2229 | # what happens later on inside histedit. |
|
2231 | 2230 | branchmerge=False, |
|
2232 | 2231 | force=False, |
|
2233 | 2232 | acceptremote=False, |
|
2234 | 2233 | followcopies=False, |
|
2235 | 2234 | ) |
|
2236 | 2235 | except error.Abort: |
|
2237 | 2236 | raise error.Abort( |
|
2238 | 2237 | _( |
|
2239 | 2238 | b"untracked files in working directory conflict with files in %s" |
|
2240 | 2239 | ) |
|
2241 | 2240 | % c |
|
2242 | 2241 | ) |
|
2243 | 2242 | |
|
2244 | 2243 | if not rules: |
|
2245 | 2244 | comment = geteditcomment(ui, node.short(root), node.short(topmost)) |
|
2246 | 2245 | actions = [pick(state, r) for r in revs] |
|
2247 | 2246 | rules = ruleeditor(repo, ui, actions, comment) |
|
2248 | 2247 | else: |
|
2249 | 2248 | rules = _readfile(ui, rules) |
|
2250 | 2249 | actions = parserules(rules, state) |
|
2251 | 2250 | warnverifyactions(ui, repo, actions, state, ctxs) |
|
2252 | 2251 | |
|
2253 | 2252 | parentctxnode = repo[root].p1().node() |
|
2254 | 2253 | |
|
2255 | 2254 | state.parentctxnode = parentctxnode |
|
2256 | 2255 | state.actions = actions |
|
2257 | 2256 | state.topmost = topmost |
|
2258 | 2257 | state.replacements = [] |
|
2259 | 2258 | |
|
2260 | 2259 | ui.log( |
|
2261 | 2260 | b"histedit", |
|
2262 | 2261 | b"%d actions to histedit\n", |
|
2263 | 2262 | len(actions), |
|
2264 | 2263 | histedit_num_actions=len(actions), |
|
2265 | 2264 | ) |
|
2266 | 2265 | |
|
2267 | 2266 | # Create a backup so we can always abort completely. |
|
2268 | 2267 | backupfile = None |
|
2269 | 2268 | if not obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
2270 | 2269 | backupfile = repair.backupbundle( |
|
2271 | 2270 | repo, [parentctxnode], [topmost], root, b'histedit' |
|
2272 | 2271 | ) |
|
2273 | 2272 | state.backupfile = backupfile |
|
2274 | 2273 | |
|
2275 | 2274 | |
|
2276 | 2275 | def _getsummary(ctx): |
|
2277 | 2276 | # a common pattern is to extract the summary but default to the empty |
|
2278 | 2277 | # string |
|
2279 | 2278 | summary = ctx.description() or b'' |
|
2280 | 2279 | if summary: |
|
2281 | 2280 | summary = summary.splitlines()[0] |
|
2282 | 2281 | return summary |
|
2283 | 2282 | |
|
2284 | 2283 | |
|
2285 | 2284 | def bootstrapcontinue(ui, state, opts): |
|
2286 | 2285 | repo = state.repo |
|
2287 | 2286 | |
|
2288 | 2287 | ms = mergestatemod.mergestate.read(repo) |
|
2289 | 2288 | mergeutil.checkunresolved(ms) |
|
2290 | 2289 | |
|
2291 | 2290 | if state.actions: |
|
2292 | 2291 | actobj = state.actions.pop(0) |
|
2293 | 2292 | |
|
2294 | 2293 | if _isdirtywc(repo): |
|
2295 | 2294 | actobj.continuedirty() |
|
2296 | 2295 | if _isdirtywc(repo): |
|
2297 | 2296 | abortdirty() |
|
2298 | 2297 | |
|
2299 | 2298 | parentctx, replacements = actobj.continueclean() |
|
2300 | 2299 | |
|
2301 | 2300 | state.parentctxnode = parentctx.node() |
|
2302 | 2301 | state.replacements.extend(replacements) |
|
2303 | 2302 | |
|
2304 | 2303 | return state |
|
2305 | 2304 | |
|
2306 | 2305 | |
|
2307 | 2306 | def between(repo, old, new, keep): |
|
2308 | 2307 | """select and validate the set of revision to edit |
|
2309 | 2308 | |
|
2310 | 2309 | When keep is false, the specified set can't have children.""" |
|
2311 | 2310 | revs = repo.revs(b'%n::%n', old, new) |
|
2312 | 2311 | if revs and not keep: |
|
2313 | 2312 | rewriteutil.precheck(repo, revs, b'edit') |
|
2314 | 2313 | if repo.revs(b'(%ld) and merge()', revs): |
|
2315 | 2314 | raise error.Abort(_(b'cannot edit history that contains merges')) |
|
2316 | 2315 | return pycompat.maplist(repo.changelog.node, revs) |
|
2317 | 2316 | |
|
2318 | 2317 | |
|
2319 | 2318 | def ruleeditor(repo, ui, actions, editcomment=b""): |
|
2320 | 2319 | """open an editor to edit rules |
|
2321 | 2320 | |
|
2322 | 2321 | rules are in the format [ [act, ctx], ...] like in state.rules |
|
2323 | 2322 | """ |
|
2324 | 2323 | if repo.ui.configbool(b"experimental", b"histedit.autoverb"): |
|
2325 | 2324 | newact = util.sortdict() |
|
2326 | 2325 | for act in actions: |
|
2327 | 2326 | ctx = repo[act.node] |
|
2328 | 2327 | summary = _getsummary(ctx) |
|
2329 | 2328 | fword = summary.split(b' ', 1)[0].lower() |
|
2330 | 2329 | added = False |
|
2331 | 2330 | |
|
2332 | 2331 | # if it doesn't end with the special character '!' just skip this |
|
2333 | 2332 | if fword.endswith(b'!'): |
|
2334 | 2333 | fword = fword[:-1] |
|
2335 | 2334 | if fword in primaryactions | secondaryactions | tertiaryactions: |
|
2336 | 2335 | act.verb = fword |
|
2337 | 2336 | # get the target summary |
|
2338 | 2337 | tsum = summary[len(fword) + 1 :].lstrip() |
|
2339 | 2338 | # safe but slow: reverse iterate over the actions so we |
|
2340 | 2339 | # don't clash on two commits having the same summary |
|
2341 | 2340 | for na, l in reversed(list(pycompat.iteritems(newact))): |
|
2342 | 2341 | actx = repo[na.node] |
|
2343 | 2342 | asum = _getsummary(actx) |
|
2344 | 2343 | if asum == tsum: |
|
2345 | 2344 | added = True |
|
2346 | 2345 | l.append(act) |
|
2347 | 2346 | break |
|
2348 | 2347 | |
|
2349 | 2348 | if not added: |
|
2350 | 2349 | newact[act] = [] |
|
2351 | 2350 | |
|
2352 | 2351 | # copy over and flatten the new list |
|
2353 | 2352 | actions = [] |
|
2354 | 2353 | for na, l in pycompat.iteritems(newact): |
|
2355 | 2354 | actions.append(na) |
|
2356 | 2355 | actions += l |
|
2357 | 2356 | |
|
2358 | 2357 | rules = b'\n'.join([act.torule() for act in actions]) |
|
2359 | 2358 | rules += b'\n\n' |
|
2360 | 2359 | rules += editcomment |
|
2361 | 2360 | rules = ui.edit( |
|
2362 | 2361 | rules, |
|
2363 | 2362 | ui.username(), |
|
2364 | 2363 | {b'prefix': b'histedit'}, |
|
2365 | 2364 | repopath=repo.path, |
|
2366 | 2365 | action=b'histedit', |
|
2367 | 2366 | ) |
|
2368 | 2367 | |
|
2369 | 2368 | # Save edit rules in .hg/histedit-last-edit.txt in case |
|
2370 | 2369 | # the user needs to ask for help after something |
|
2371 | 2370 | # surprising happens. |
|
2372 | 2371 | with repo.vfs(b'histedit-last-edit.txt', b'wb') as f: |
|
2373 | 2372 | f.write(rules) |
|
2374 | 2373 | |
|
2375 | 2374 | return rules |
|
2376 | 2375 | |
|
2377 | 2376 | |
|
2378 | 2377 | def parserules(rules, state): |
|
2379 | 2378 | """Read the histedit rules string and return list of action objects """ |
|
2380 | 2379 | rules = [ |
|
2381 | 2380 | l |
|
2382 | 2381 | for l in (r.strip() for r in rules.splitlines()) |
|
2383 | 2382 | if l and not l.startswith(b'#') |
|
2384 | 2383 | ] |
|
2385 | 2384 | actions = [] |
|
2386 | 2385 | for r in rules: |
|
2387 | 2386 | if b' ' not in r: |
|
2388 | 2387 | raise error.ParseError(_(b'malformed line "%s"') % r) |
|
2389 | 2388 | verb, rest = r.split(b' ', 1) |
|
2390 | 2389 | |
|
2391 | 2390 | if verb not in actiontable: |
|
2392 | 2391 | raise error.ParseError(_(b'unknown action "%s"') % verb) |
|
2393 | 2392 | |
|
2394 | 2393 | action = actiontable[verb].fromrule(state, rest) |
|
2395 | 2394 | actions.append(action) |
|
2396 | 2395 | return actions |
|
2397 | 2396 | |
|
2398 | 2397 | |
|
2399 | 2398 | def warnverifyactions(ui, repo, actions, state, ctxs): |
|
2400 | 2399 | try: |
|
2401 | 2400 | verifyactions(actions, state, ctxs) |
|
2402 | 2401 | except error.ParseError: |
|
2403 | 2402 | if repo.vfs.exists(b'histedit-last-edit.txt'): |
|
2404 | 2403 | ui.warn( |
|
2405 | 2404 | _( |
|
2406 | 2405 | b'warning: histedit rules saved ' |
|
2407 | 2406 | b'to: .hg/histedit-last-edit.txt\n' |
|
2408 | 2407 | ) |
|
2409 | 2408 | ) |
|
2410 | 2409 | raise |
|
2411 | 2410 | |
|
2412 | 2411 | |
|
2413 | 2412 | def verifyactions(actions, state, ctxs): |
|
2414 | 2413 | """Verify that there exists exactly one action per given changeset and |
|
2415 | 2414 | other constraints. |
|
2416 | 2415 | |
|
2417 | 2416 | Will abort if there are to many or too few rules, a malformed rule, |
|
2418 | 2417 | or a rule on a changeset outside of the user-given range. |
|
2419 | 2418 | """ |
|
2420 | 2419 | expected = {c.node() for c in ctxs} |
|
2421 | 2420 | seen = set() |
|
2422 | 2421 | prev = None |
|
2423 | 2422 | |
|
2424 | 2423 | if actions and actions[0].verb in [b'roll', b'fold']: |
|
2425 | 2424 | raise error.ParseError( |
|
2426 | 2425 | _(b'first changeset cannot use verb "%s"') % actions[0].verb |
|
2427 | 2426 | ) |
|
2428 | 2427 | |
|
2429 | 2428 | for action in actions: |
|
2430 | 2429 | action.verify(prev, expected, seen) |
|
2431 | 2430 | prev = action |
|
2432 | 2431 | if action.node is not None: |
|
2433 | 2432 | seen.add(action.node) |
|
2434 | 2433 | missing = sorted(expected - seen) # sort to stabilize output |
|
2435 | 2434 | |
|
2436 | 2435 | if state.repo.ui.configbool(b'histedit', b'dropmissing'): |
|
2437 | 2436 | if len(actions) == 0: |
|
2438 | 2437 | raise error.ParseError( |
|
2439 | 2438 | _(b'no rules provided'), |
|
2440 | 2439 | hint=_(b'use strip extension to remove commits'), |
|
2441 | 2440 | ) |
|
2442 | 2441 | |
|
2443 | 2442 | drops = [drop(state, n) for n in missing] |
|
2444 | 2443 | # put the in the beginning so they execute immediately and |
|
2445 | 2444 | # don't show in the edit-plan in the future |
|
2446 | 2445 | actions[:0] = drops |
|
2447 | 2446 | elif missing: |
|
2448 | 2447 | raise error.ParseError( |
|
2449 | 2448 | _(b'missing rules for changeset %s') % node.short(missing[0]), |
|
2450 | 2449 | hint=_( |
|
2451 | 2450 | b'use "drop %s" to discard, see also: ' |
|
2452 | 2451 | b"'hg help -e histedit.config'" |
|
2453 | 2452 | ) |
|
2454 | 2453 | % node.short(missing[0]), |
|
2455 | 2454 | ) |
|
2456 | 2455 | |
|
2457 | 2456 | |
|
2458 | 2457 | def adjustreplacementsfrommarkers(repo, oldreplacements): |
|
2459 | 2458 | """Adjust replacements from obsolescence markers |
|
2460 | 2459 | |
|
2461 | 2460 | Replacements structure is originally generated based on |
|
2462 | 2461 | histedit's state and does not account for changes that are |
|
2463 | 2462 | not recorded there. This function fixes that by adding |
|
2464 | 2463 | data read from obsolescence markers""" |
|
2465 | 2464 | if not obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
2466 | 2465 | return oldreplacements |
|
2467 | 2466 | |
|
2468 | 2467 | unfi = repo.unfiltered() |
|
2469 | 2468 | get_rev = unfi.changelog.index.get_rev |
|
2470 | 2469 | obsstore = repo.obsstore |
|
2471 | 2470 | newreplacements = list(oldreplacements) |
|
2472 | 2471 | oldsuccs = [r[1] for r in oldreplacements] |
|
2473 | 2472 | # successors that have already been added to succstocheck once |
|
2474 | 2473 | seensuccs = set().union( |
|
2475 | 2474 | *oldsuccs |
|
2476 | 2475 | ) # create a set from an iterable of tuples |
|
2477 | 2476 | succstocheck = list(seensuccs) |
|
2478 | 2477 | while succstocheck: |
|
2479 | 2478 | n = succstocheck.pop() |
|
2480 | 2479 | missing = get_rev(n) is None |
|
2481 | 2480 | markers = obsstore.successors.get(n, ()) |
|
2482 | 2481 | if missing and not markers: |
|
2483 | 2482 | # dead end, mark it as such |
|
2484 | 2483 | newreplacements.append((n, ())) |
|
2485 | 2484 | for marker in markers: |
|
2486 | 2485 | nsuccs = marker[1] |
|
2487 | 2486 | newreplacements.append((n, nsuccs)) |
|
2488 | 2487 | for nsucc in nsuccs: |
|
2489 | 2488 | if nsucc not in seensuccs: |
|
2490 | 2489 | seensuccs.add(nsucc) |
|
2491 | 2490 | succstocheck.append(nsucc) |
|
2492 | 2491 | |
|
2493 | 2492 | return newreplacements |
|
2494 | 2493 | |
|
2495 | 2494 | |
|
2496 | 2495 | def processreplacement(state): |
|
2497 | 2496 | """process the list of replacements to return |
|
2498 | 2497 | |
|
2499 | 2498 | 1) the final mapping between original and created nodes |
|
2500 | 2499 | 2) the list of temporary node created by histedit |
|
2501 | 2500 | 3) the list of new commit created by histedit""" |
|
2502 | 2501 | replacements = adjustreplacementsfrommarkers(state.repo, state.replacements) |
|
2503 | 2502 | allsuccs = set() |
|
2504 | 2503 | replaced = set() |
|
2505 | 2504 | fullmapping = {} |
|
2506 | 2505 | # initialize basic set |
|
2507 | 2506 | # fullmapping records all operations recorded in replacement |
|
2508 | 2507 | for rep in replacements: |
|
2509 | 2508 | allsuccs.update(rep[1]) |
|
2510 | 2509 | replaced.add(rep[0]) |
|
2511 | 2510 | fullmapping.setdefault(rep[0], set()).update(rep[1]) |
|
2512 | 2511 | new = allsuccs - replaced |
|
2513 | 2512 | tmpnodes = allsuccs & replaced |
|
2514 | 2513 | # Reduce content fullmapping into direct relation between original nodes |
|
2515 | 2514 | # and final node created during history edition |
|
2516 | 2515 | # Dropped changeset are replaced by an empty list |
|
2517 | 2516 | toproceed = set(fullmapping) |
|
2518 | 2517 | final = {} |
|
2519 | 2518 | while toproceed: |
|
2520 | 2519 | for x in list(toproceed): |
|
2521 | 2520 | succs = fullmapping[x] |
|
2522 | 2521 | for s in list(succs): |
|
2523 | 2522 | if s in toproceed: |
|
2524 | 2523 | # non final node with unknown closure |
|
2525 | 2524 | # We can't process this now |
|
2526 | 2525 | break |
|
2527 | 2526 | elif s in final: |
|
2528 | 2527 | # non final node, replace with closure |
|
2529 | 2528 | succs.remove(s) |
|
2530 | 2529 | succs.update(final[s]) |
|
2531 | 2530 | else: |
|
2532 | 2531 | final[x] = succs |
|
2533 | 2532 | toproceed.remove(x) |
|
2534 | 2533 | # remove tmpnodes from final mapping |
|
2535 | 2534 | for n in tmpnodes: |
|
2536 | 2535 | del final[n] |
|
2537 | 2536 | # we expect all changes involved in final to exist in the repo |
|
2538 | 2537 | # turn `final` into list (topologically sorted) |
|
2539 | 2538 | get_rev = state.repo.changelog.index.get_rev |
|
2540 | 2539 | for prec, succs in final.items(): |
|
2541 | 2540 | final[prec] = sorted(succs, key=get_rev) |
|
2542 | 2541 | |
|
2543 | 2542 | # computed topmost element (necessary for bookmark) |
|
2544 | 2543 | if new: |
|
2545 | 2544 | newtopmost = sorted(new, key=state.repo.changelog.rev)[-1] |
|
2546 | 2545 | elif not final: |
|
2547 | 2546 | # Nothing rewritten at all. we won't need `newtopmost` |
|
2548 | 2547 | # It is the same as `oldtopmost` and `processreplacement` know it |
|
2549 | 2548 | newtopmost = None |
|
2550 | 2549 | else: |
|
2551 | 2550 | # every body died. The newtopmost is the parent of the root. |
|
2552 | 2551 | r = state.repo.changelog.rev |
|
2553 | 2552 | newtopmost = state.repo[sorted(final, key=r)[0]].p1().node() |
|
2554 | 2553 | |
|
2555 | 2554 | return final, tmpnodes, new, newtopmost |
|
2556 | 2555 | |
|
2557 | 2556 | |
|
2558 | 2557 | def movetopmostbookmarks(repo, oldtopmost, newtopmost): |
|
2559 | 2558 | """Move bookmark from oldtopmost to newly created topmost |
|
2560 | 2559 | |
|
2561 | 2560 | This is arguably a feature and we may only want that for the active |
|
2562 | 2561 | bookmark. But the behavior is kept compatible with the old version for now. |
|
2563 | 2562 | """ |
|
2564 | 2563 | if not oldtopmost or not newtopmost: |
|
2565 | 2564 | return |
|
2566 | 2565 | oldbmarks = repo.nodebookmarks(oldtopmost) |
|
2567 | 2566 | if oldbmarks: |
|
2568 | 2567 | with repo.lock(), repo.transaction(b'histedit') as tr: |
|
2569 | 2568 | marks = repo._bookmarks |
|
2570 | 2569 | changes = [] |
|
2571 | 2570 | for name in oldbmarks: |
|
2572 | 2571 | changes.append((name, newtopmost)) |
|
2573 | 2572 | marks.applychanges(repo, tr, changes) |
|
2574 | 2573 | |
|
2575 | 2574 | |
|
2576 | 2575 | def cleanupnode(ui, repo, nodes, nobackup=False): |
|
2577 | 2576 | """strip a group of nodes from the repository |
|
2578 | 2577 | |
|
2579 | 2578 | The set of node to strip may contains unknown nodes.""" |
|
2580 | 2579 | with repo.lock(): |
|
2581 | 2580 | # do not let filtering get in the way of the cleanse |
|
2582 | 2581 | # we should probably get rid of obsolescence marker created during the |
|
2583 | 2582 | # histedit, but we currently do not have such information. |
|
2584 | 2583 | repo = repo.unfiltered() |
|
2585 | 2584 | # Find all nodes that need to be stripped |
|
2586 | 2585 | # (we use %lr instead of %ln to silently ignore unknown items) |
|
2587 | 2586 | has_node = repo.changelog.index.has_node |
|
2588 | 2587 | nodes = sorted(n for n in nodes if has_node(n)) |
|
2589 | 2588 | roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)] |
|
2590 | 2589 | if roots: |
|
2591 | 2590 | backup = not nobackup |
|
2592 | 2591 | repair.strip(ui, repo, roots, backup=backup) |
|
2593 | 2592 | |
|
2594 | 2593 | |
|
2595 | 2594 | def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs): |
|
2596 | 2595 | if isinstance(nodelist, bytes): |
|
2597 | 2596 | nodelist = [nodelist] |
|
2598 | 2597 | state = histeditstate(repo) |
|
2599 | 2598 | if state.inprogress(): |
|
2600 | 2599 | state.read() |
|
2601 | 2600 | histedit_nodes = { |
|
2602 | 2601 | action.node for action in state.actions if action.node |
|
2603 | 2602 | } |
|
2604 | 2603 | common_nodes = histedit_nodes & set(nodelist) |
|
2605 | 2604 | if common_nodes: |
|
2606 | 2605 | raise error.Abort( |
|
2607 | 2606 | _(b"histedit in progress, can't strip %s") |
|
2608 | 2607 | % b', '.join(node.short(x) for x in common_nodes) |
|
2609 | 2608 | ) |
|
2610 | 2609 | return orig(ui, repo, nodelist, *args, **kwargs) |
|
2611 | 2610 | |
|
2612 | 2611 | |
|
2613 | 2612 | extensions.wrapfunction(repair, b'strip', stripwrapper) |
|
2614 | 2613 | |
|
2615 | 2614 | |
|
2616 | 2615 | def summaryhook(ui, repo): |
|
2617 | 2616 | state = histeditstate(repo) |
|
2618 | 2617 | if not state.inprogress(): |
|
2619 | 2618 | return |
|
2620 | 2619 | state.read() |
|
2621 | 2620 | if state.actions: |
|
2622 | 2621 | # i18n: column positioning for "hg summary" |
|
2623 | 2622 | ui.write( |
|
2624 | 2623 | _(b'hist: %s (histedit --continue)\n') |
|
2625 | 2624 | % ( |
|
2626 | 2625 | ui.label(_(b'%d remaining'), b'histedit.remaining') |
|
2627 | 2626 | % len(state.actions) |
|
2628 | 2627 | ) |
|
2629 | 2628 | ) |
|
2630 | 2629 | |
|
2631 | 2630 | |
|
2632 | 2631 | def extsetup(ui): |
|
2633 | 2632 | cmdutil.summaryhooks.add(b'histedit', summaryhook) |
|
2634 | 2633 | statemod.addunfinished( |
|
2635 | 2634 | b'histedit', |
|
2636 | 2635 | fname=b'histedit-state', |
|
2637 | 2636 | allowcommit=True, |
|
2638 | 2637 | continueflag=True, |
|
2639 | 2638 | abortfunc=hgaborthistedit, |
|
2640 | 2639 | ) |
@@ -1,2276 +1,2275 b'' | |||
|
1 | 1 | # rebase.py - rebasing feature for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''command to move sets of revisions to a different ancestor |
|
9 | 9 | |
|
10 | 10 | This extension lets you rebase changesets in an existing Mercurial |
|
11 | 11 | repository. |
|
12 | 12 | |
|
13 | 13 | For more information: |
|
14 | 14 | https://mercurial-scm.org/wiki/RebaseExtension |
|
15 | 15 | ''' |
|
16 | 16 | |
|
17 | 17 | from __future__ import absolute_import |
|
18 | 18 | |
|
19 | 19 | import errno |
|
20 | 20 | import os |
|
21 | 21 | |
|
22 | 22 | from mercurial.i18n import _ |
|
23 | 23 | from mercurial.node import ( |
|
24 | 24 | nullrev, |
|
25 | 25 | short, |
|
26 | 26 | ) |
|
27 | 27 | from mercurial.pycompat import open |
|
28 | 28 | from mercurial import ( |
|
29 | 29 | bookmarks, |
|
30 | 30 | cmdutil, |
|
31 | 31 | commands, |
|
32 | 32 | copies, |
|
33 | 33 | destutil, |
|
34 | 34 | dirstateguard, |
|
35 | 35 | error, |
|
36 | 36 | extensions, |
|
37 | hg, | |
|
38 | 37 | merge as mergemod, |
|
39 | 38 | mergestate as mergestatemod, |
|
40 | 39 | mergeutil, |
|
41 | 40 | node as nodemod, |
|
42 | 41 | obsolete, |
|
43 | 42 | obsutil, |
|
44 | 43 | patch, |
|
45 | 44 | phases, |
|
46 | 45 | pycompat, |
|
47 | 46 | registrar, |
|
48 | 47 | repair, |
|
49 | 48 | revset, |
|
50 | 49 | revsetlang, |
|
51 | 50 | rewriteutil, |
|
52 | 51 | scmutil, |
|
53 | 52 | smartset, |
|
54 | 53 | state as statemod, |
|
55 | 54 | util, |
|
56 | 55 | ) |
|
57 | 56 | |
|
58 | 57 | # The following constants are used throughout the rebase module. The ordering of |
|
59 | 58 | # their values must be maintained. |
|
60 | 59 | |
|
61 | 60 | # Indicates that a revision needs to be rebased |
|
62 | 61 | revtodo = -1 |
|
63 | 62 | revtodostr = b'-1' |
|
64 | 63 | |
|
65 | 64 | # legacy revstates no longer needed in current code |
|
66 | 65 | # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned |
|
67 | 66 | legacystates = {b'-2', b'-3', b'-4', b'-5'} |
|
68 | 67 | |
|
69 | 68 | cmdtable = {} |
|
70 | 69 | command = registrar.command(cmdtable) |
|
71 | 70 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
72 | 71 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
73 | 72 | # be specifying the version(s) of Mercurial they are tested with, or |
|
74 | 73 | # leave the attribute unspecified. |
|
75 | 74 | testedwith = b'ships-with-hg-core' |
|
76 | 75 | |
|
77 | 76 | |
|
78 | 77 | def _nothingtorebase(): |
|
79 | 78 | return 1 |
|
80 | 79 | |
|
81 | 80 | |
|
82 | 81 | def _savegraft(ctx, extra): |
|
83 | 82 | s = ctx.extra().get(b'source', None) |
|
84 | 83 | if s is not None: |
|
85 | 84 | extra[b'source'] = s |
|
86 | 85 | s = ctx.extra().get(b'intermediate-source', None) |
|
87 | 86 | if s is not None: |
|
88 | 87 | extra[b'intermediate-source'] = s |
|
89 | 88 | |
|
90 | 89 | |
|
91 | 90 | def _savebranch(ctx, extra): |
|
92 | 91 | extra[b'branch'] = ctx.branch() |
|
93 | 92 | |
|
94 | 93 | |
|
95 | 94 | def _destrebase(repo, sourceset, destspace=None): |
|
96 | 95 | """small wrapper around destmerge to pass the right extra args |
|
97 | 96 | |
|
98 | 97 | Please wrap destutil.destmerge instead.""" |
|
99 | 98 | return destutil.destmerge( |
|
100 | 99 | repo, |
|
101 | 100 | action=b'rebase', |
|
102 | 101 | sourceset=sourceset, |
|
103 | 102 | onheadcheck=False, |
|
104 | 103 | destspace=destspace, |
|
105 | 104 | ) |
|
106 | 105 | |
|
107 | 106 | |
|
108 | 107 | revsetpredicate = registrar.revsetpredicate() |
|
109 | 108 | |
|
110 | 109 | |
|
111 | 110 | @revsetpredicate(b'_destrebase') |
|
112 | 111 | def _revsetdestrebase(repo, subset, x): |
|
113 | 112 | # ``_rebasedefaultdest()`` |
|
114 | 113 | |
|
115 | 114 | # default destination for rebase. |
|
116 | 115 | # # XXX: Currently private because I expect the signature to change. |
|
117 | 116 | # # XXX: - bailing out in case of ambiguity vs returning all data. |
|
118 | 117 | # i18n: "_rebasedefaultdest" is a keyword |
|
119 | 118 | sourceset = None |
|
120 | 119 | if x is not None: |
|
121 | 120 | sourceset = revset.getset(repo, smartset.fullreposet(repo), x) |
|
122 | 121 | return subset & smartset.baseset([_destrebase(repo, sourceset)]) |
|
123 | 122 | |
|
124 | 123 | |
|
125 | 124 | @revsetpredicate(b'_destautoorphanrebase') |
|
126 | 125 | def _revsetdestautoorphanrebase(repo, subset, x): |
|
127 | 126 | # ``_destautoorphanrebase()`` |
|
128 | 127 | |
|
129 | 128 | # automatic rebase destination for a single orphan revision. |
|
130 | 129 | unfi = repo.unfiltered() |
|
131 | 130 | obsoleted = unfi.revs(b'obsolete()') |
|
132 | 131 | |
|
133 | 132 | src = revset.getset(repo, subset, x).first() |
|
134 | 133 | |
|
135 | 134 | # Empty src or already obsoleted - Do not return a destination |
|
136 | 135 | if not src or src in obsoleted: |
|
137 | 136 | return smartset.baseset() |
|
138 | 137 | dests = destutil.orphanpossibledestination(repo, src) |
|
139 | 138 | if len(dests) > 1: |
|
140 | 139 | raise error.Abort( |
|
141 | 140 | _(b"ambiguous automatic rebase: %r could end up on any of %r") |
|
142 | 141 | % (src, dests) |
|
143 | 142 | ) |
|
144 | 143 | # We have zero or one destination, so we can just return here. |
|
145 | 144 | return smartset.baseset(dests) |
|
146 | 145 | |
|
147 | 146 | |
|
148 | 147 | def _ctxdesc(ctx): |
|
149 | 148 | """short description for a context""" |
|
150 | 149 | desc = b'%d:%s "%s"' % ( |
|
151 | 150 | ctx.rev(), |
|
152 | 151 | ctx, |
|
153 | 152 | ctx.description().split(b'\n', 1)[0], |
|
154 | 153 | ) |
|
155 | 154 | repo = ctx.repo() |
|
156 | 155 | names = [] |
|
157 | 156 | for nsname, ns in pycompat.iteritems(repo.names): |
|
158 | 157 | if nsname == b'branches': |
|
159 | 158 | continue |
|
160 | 159 | names.extend(ns.names(repo, ctx.node())) |
|
161 | 160 | if names: |
|
162 | 161 | desc += b' (%s)' % b' '.join(names) |
|
163 | 162 | return desc |
|
164 | 163 | |
|
165 | 164 | |
|
166 | 165 | class rebaseruntime(object): |
|
167 | 166 | """This class is a container for rebase runtime state""" |
|
168 | 167 | |
|
169 | 168 | def __init__(self, repo, ui, inmemory=False, dryrun=False, opts=None): |
|
170 | 169 | if opts is None: |
|
171 | 170 | opts = {} |
|
172 | 171 | |
|
173 | 172 | # prepared: whether we have rebasestate prepared or not. Currently it |
|
174 | 173 | # decides whether "self.repo" is unfiltered or not. |
|
175 | 174 | # The rebasestate has explicit hash to hash instructions not depending |
|
176 | 175 | # on visibility. If rebasestate exists (in-memory or on-disk), use |
|
177 | 176 | # unfiltered repo to avoid visibility issues. |
|
178 | 177 | # Before knowing rebasestate (i.e. when starting a new rebase (not |
|
179 | 178 | # --continue or --abort)), the original repo should be used so |
|
180 | 179 | # visibility-dependent revsets are correct. |
|
181 | 180 | self.prepared = False |
|
182 | 181 | self.resume = False |
|
183 | 182 | self._repo = repo |
|
184 | 183 | |
|
185 | 184 | self.ui = ui |
|
186 | 185 | self.opts = opts |
|
187 | 186 | self.originalwd = None |
|
188 | 187 | self.external = nullrev |
|
189 | 188 | # Mapping between the old revision id and either what is the new rebased |
|
190 | 189 | # revision or what needs to be done with the old revision. The state |
|
191 | 190 | # dict will be what contains most of the rebase progress state. |
|
192 | 191 | self.state = {} |
|
193 | 192 | self.activebookmark = None |
|
194 | 193 | self.destmap = {} |
|
195 | 194 | self.skipped = set() |
|
196 | 195 | |
|
197 | 196 | self.collapsef = opts.get(b'collapse', False) |
|
198 | 197 | self.collapsemsg = cmdutil.logmessage(ui, opts) |
|
199 | 198 | self.date = opts.get(b'date', None) |
|
200 | 199 | |
|
201 | 200 | e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion |
|
202 | 201 | self.extrafns = [_savegraft] |
|
203 | 202 | if e: |
|
204 | 203 | self.extrafns = [e] |
|
205 | 204 | |
|
206 | 205 | self.backupf = ui.configbool(b'rewrite', b'backup-bundle') |
|
207 | 206 | self.keepf = opts.get(b'keep', False) |
|
208 | 207 | self.keepbranchesf = opts.get(b'keepbranches', False) |
|
209 | 208 | self.skipemptysuccessorf = rewriteutil.skip_empty_successor( |
|
210 | 209 | repo.ui, b'rebase' |
|
211 | 210 | ) |
|
212 | 211 | self.obsoletenotrebased = {} |
|
213 | 212 | self.obsoletewithoutsuccessorindestination = set() |
|
214 | 213 | self.inmemory = inmemory |
|
215 | 214 | self.dryrun = dryrun |
|
216 | 215 | self.stateobj = statemod.cmdstate(repo, b'rebasestate') |
|
217 | 216 | |
|
218 | 217 | @property |
|
219 | 218 | def repo(self): |
|
220 | 219 | if self.prepared: |
|
221 | 220 | return self._repo.unfiltered() |
|
222 | 221 | else: |
|
223 | 222 | return self._repo |
|
224 | 223 | |
|
225 | 224 | def storestatus(self, tr=None): |
|
226 | 225 | """Store the current status to allow recovery""" |
|
227 | 226 | if tr: |
|
228 | 227 | tr.addfilegenerator( |
|
229 | 228 | b'rebasestate', |
|
230 | 229 | (b'rebasestate',), |
|
231 | 230 | self._writestatus, |
|
232 | 231 | location=b'plain', |
|
233 | 232 | ) |
|
234 | 233 | else: |
|
235 | 234 | with self.repo.vfs(b"rebasestate", b"w") as f: |
|
236 | 235 | self._writestatus(f) |
|
237 | 236 | |
|
238 | 237 | def _writestatus(self, f): |
|
239 | 238 | repo = self.repo |
|
240 | 239 | assert repo.filtername is None |
|
241 | 240 | f.write(repo[self.originalwd].hex() + b'\n') |
|
242 | 241 | # was "dest". we now write dest per src root below. |
|
243 | 242 | f.write(b'\n') |
|
244 | 243 | f.write(repo[self.external].hex() + b'\n') |
|
245 | 244 | f.write(b'%d\n' % int(self.collapsef)) |
|
246 | 245 | f.write(b'%d\n' % int(self.keepf)) |
|
247 | 246 | f.write(b'%d\n' % int(self.keepbranchesf)) |
|
248 | 247 | f.write(b'%s\n' % (self.activebookmark or b'')) |
|
249 | 248 | destmap = self.destmap |
|
250 | 249 | for d, v in pycompat.iteritems(self.state): |
|
251 | 250 | oldrev = repo[d].hex() |
|
252 | 251 | if v >= 0: |
|
253 | 252 | newrev = repo[v].hex() |
|
254 | 253 | else: |
|
255 | 254 | newrev = b"%d" % v |
|
256 | 255 | destnode = repo[destmap[d]].hex() |
|
257 | 256 | f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode)) |
|
258 | 257 | repo.ui.debug(b'rebase status stored\n') |
|
259 | 258 | |
|
260 | 259 | def restorestatus(self): |
|
261 | 260 | """Restore a previously stored status""" |
|
262 | 261 | if not self.stateobj.exists(): |
|
263 | 262 | cmdutil.wrongtooltocontinue(self.repo, _(b'rebase')) |
|
264 | 263 | |
|
265 | 264 | data = self._read() |
|
266 | 265 | self.repo.ui.debug(b'rebase status resumed\n') |
|
267 | 266 | |
|
268 | 267 | self.originalwd = data[b'originalwd'] |
|
269 | 268 | self.destmap = data[b'destmap'] |
|
270 | 269 | self.state = data[b'state'] |
|
271 | 270 | self.skipped = data[b'skipped'] |
|
272 | 271 | self.collapsef = data[b'collapse'] |
|
273 | 272 | self.keepf = data[b'keep'] |
|
274 | 273 | self.keepbranchesf = data[b'keepbranches'] |
|
275 | 274 | self.external = data[b'external'] |
|
276 | 275 | self.activebookmark = data[b'activebookmark'] |
|
277 | 276 | |
|
278 | 277 | def _read(self): |
|
279 | 278 | self.prepared = True |
|
280 | 279 | repo = self.repo |
|
281 | 280 | assert repo.filtername is None |
|
282 | 281 | data = { |
|
283 | 282 | b'keepbranches': None, |
|
284 | 283 | b'collapse': None, |
|
285 | 284 | b'activebookmark': None, |
|
286 | 285 | b'external': nullrev, |
|
287 | 286 | b'keep': None, |
|
288 | 287 | b'originalwd': None, |
|
289 | 288 | } |
|
290 | 289 | legacydest = None |
|
291 | 290 | state = {} |
|
292 | 291 | destmap = {} |
|
293 | 292 | |
|
294 | 293 | if True: |
|
295 | 294 | f = repo.vfs(b"rebasestate") |
|
296 | 295 | for i, l in enumerate(f.read().splitlines()): |
|
297 | 296 | if i == 0: |
|
298 | 297 | data[b'originalwd'] = repo[l].rev() |
|
299 | 298 | elif i == 1: |
|
300 | 299 | # this line should be empty in newer version. but legacy |
|
301 | 300 | # clients may still use it |
|
302 | 301 | if l: |
|
303 | 302 | legacydest = repo[l].rev() |
|
304 | 303 | elif i == 2: |
|
305 | 304 | data[b'external'] = repo[l].rev() |
|
306 | 305 | elif i == 3: |
|
307 | 306 | data[b'collapse'] = bool(int(l)) |
|
308 | 307 | elif i == 4: |
|
309 | 308 | data[b'keep'] = bool(int(l)) |
|
310 | 309 | elif i == 5: |
|
311 | 310 | data[b'keepbranches'] = bool(int(l)) |
|
312 | 311 | elif i == 6 and not (len(l) == 81 and b':' in l): |
|
313 | 312 | # line 6 is a recent addition, so for backwards |
|
314 | 313 | # compatibility check that the line doesn't look like the |
|
315 | 314 | # oldrev:newrev lines |
|
316 | 315 | data[b'activebookmark'] = l |
|
317 | 316 | else: |
|
318 | 317 | args = l.split(b':') |
|
319 | 318 | oldrev = repo[args[0]].rev() |
|
320 | 319 | newrev = args[1] |
|
321 | 320 | if newrev in legacystates: |
|
322 | 321 | continue |
|
323 | 322 | if len(args) > 2: |
|
324 | 323 | destrev = repo[args[2]].rev() |
|
325 | 324 | else: |
|
326 | 325 | destrev = legacydest |
|
327 | 326 | destmap[oldrev] = destrev |
|
328 | 327 | if newrev == revtodostr: |
|
329 | 328 | state[oldrev] = revtodo |
|
330 | 329 | # Legacy compat special case |
|
331 | 330 | else: |
|
332 | 331 | state[oldrev] = repo[newrev].rev() |
|
333 | 332 | |
|
334 | 333 | if data[b'keepbranches'] is None: |
|
335 | 334 | raise error.Abort(_(b'.hg/rebasestate is incomplete')) |
|
336 | 335 | |
|
337 | 336 | data[b'destmap'] = destmap |
|
338 | 337 | data[b'state'] = state |
|
339 | 338 | skipped = set() |
|
340 | 339 | # recompute the set of skipped revs |
|
341 | 340 | if not data[b'collapse']: |
|
342 | 341 | seen = set(destmap.values()) |
|
343 | 342 | for old, new in sorted(state.items()): |
|
344 | 343 | if new != revtodo and new in seen: |
|
345 | 344 | skipped.add(old) |
|
346 | 345 | seen.add(new) |
|
347 | 346 | data[b'skipped'] = skipped |
|
348 | 347 | repo.ui.debug( |
|
349 | 348 | b'computed skipped revs: %s\n' |
|
350 | 349 | % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'') |
|
351 | 350 | ) |
|
352 | 351 | |
|
353 | 352 | return data |
|
354 | 353 | |
|
355 | 354 | def _handleskippingobsolete(self, obsoleterevs, destmap): |
|
356 | 355 | """Compute structures necessary for skipping obsolete revisions |
|
357 | 356 | |
|
358 | 357 | obsoleterevs: iterable of all obsolete revisions in rebaseset |
|
359 | 358 | destmap: {srcrev: destrev} destination revisions |
|
360 | 359 | """ |
|
361 | 360 | self.obsoletenotrebased = {} |
|
362 | 361 | if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'): |
|
363 | 362 | return |
|
364 | 363 | obsoleteset = set(obsoleterevs) |
|
365 | 364 | ( |
|
366 | 365 | self.obsoletenotrebased, |
|
367 | 366 | self.obsoletewithoutsuccessorindestination, |
|
368 | 367 | obsoleteextinctsuccessors, |
|
369 | 368 | ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap) |
|
370 | 369 | skippedset = set(self.obsoletenotrebased) |
|
371 | 370 | skippedset.update(self.obsoletewithoutsuccessorindestination) |
|
372 | 371 | skippedset.update(obsoleteextinctsuccessors) |
|
373 | 372 | _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset) |
|
374 | 373 | |
|
375 | 374 | def _prepareabortorcontinue( |
|
376 | 375 | self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False |
|
377 | 376 | ): |
|
378 | 377 | self.resume = True |
|
379 | 378 | try: |
|
380 | 379 | self.restorestatus() |
|
381 | 380 | self.collapsemsg = restorecollapsemsg(self.repo, isabort) |
|
382 | 381 | except error.RepoLookupError: |
|
383 | 382 | if isabort: |
|
384 | 383 | clearstatus(self.repo) |
|
385 | 384 | clearcollapsemsg(self.repo) |
|
386 | 385 | self.repo.ui.warn( |
|
387 | 386 | _( |
|
388 | 387 | b'rebase aborted (no revision is removed,' |
|
389 | 388 | b' only broken state is cleared)\n' |
|
390 | 389 | ) |
|
391 | 390 | ) |
|
392 | 391 | return 0 |
|
393 | 392 | else: |
|
394 | 393 | msg = _(b'cannot continue inconsistent rebase') |
|
395 | 394 | hint = _(b'use "hg rebase --abort" to clear broken state') |
|
396 | 395 | raise error.Abort(msg, hint=hint) |
|
397 | 396 | |
|
398 | 397 | if isabort: |
|
399 | 398 | backup = backup and self.backupf |
|
400 | 399 | return self._abort( |
|
401 | 400 | backup=backup, |
|
402 | 401 | suppwarns=suppwarns, |
|
403 | 402 | dryrun=dryrun, |
|
404 | 403 | confirm=confirm, |
|
405 | 404 | ) |
|
406 | 405 | |
|
407 | 406 | def _preparenewrebase(self, destmap): |
|
408 | 407 | if not destmap: |
|
409 | 408 | return _nothingtorebase() |
|
410 | 409 | |
|
411 | 410 | rebaseset = destmap.keys() |
|
412 | 411 | if not self.keepf: |
|
413 | 412 | try: |
|
414 | 413 | rewriteutil.precheck(self.repo, rebaseset, action=b'rebase') |
|
415 | 414 | except error.Abort as e: |
|
416 | 415 | if e.hint is None: |
|
417 | 416 | e.hint = _(b'use --keep to keep original changesets') |
|
418 | 417 | raise e |
|
419 | 418 | |
|
420 | 419 | result = buildstate(self.repo, destmap, self.collapsef) |
|
421 | 420 | |
|
422 | 421 | if not result: |
|
423 | 422 | # Empty state built, nothing to rebase |
|
424 | 423 | self.ui.status(_(b'nothing to rebase\n')) |
|
425 | 424 | return _nothingtorebase() |
|
426 | 425 | |
|
427 | 426 | (self.originalwd, self.destmap, self.state) = result |
|
428 | 427 | if self.collapsef: |
|
429 | 428 | dests = set(self.destmap.values()) |
|
430 | 429 | if len(dests) != 1: |
|
431 | 430 | raise error.Abort( |
|
432 | 431 | _(b'--collapse does not work with multiple destinations') |
|
433 | 432 | ) |
|
434 | 433 | destrev = next(iter(dests)) |
|
435 | 434 | destancestors = self.repo.changelog.ancestors( |
|
436 | 435 | [destrev], inclusive=True |
|
437 | 436 | ) |
|
438 | 437 | self.external = externalparent(self.repo, self.state, destancestors) |
|
439 | 438 | |
|
440 | 439 | for destrev in sorted(set(destmap.values())): |
|
441 | 440 | dest = self.repo[destrev] |
|
442 | 441 | if dest.closesbranch() and not self.keepbranchesf: |
|
443 | 442 | self.ui.status(_(b'reopening closed branch head %s\n') % dest) |
|
444 | 443 | |
|
445 | 444 | self.prepared = True |
|
446 | 445 | |
|
447 | 446 | def _assignworkingcopy(self): |
|
448 | 447 | if self.inmemory: |
|
449 | 448 | from mercurial.context import overlayworkingctx |
|
450 | 449 | |
|
451 | 450 | self.wctx = overlayworkingctx(self.repo) |
|
452 | 451 | self.repo.ui.debug(b"rebasing in memory\n") |
|
453 | 452 | else: |
|
454 | 453 | self.wctx = self.repo[None] |
|
455 | 454 | self.repo.ui.debug(b"rebasing on disk\n") |
|
456 | 455 | self.repo.ui.log( |
|
457 | 456 | b"rebase", |
|
458 | 457 | b"using in-memory rebase: %r\n", |
|
459 | 458 | self.inmemory, |
|
460 | 459 | rebase_imm_used=self.inmemory, |
|
461 | 460 | ) |
|
462 | 461 | |
|
463 | 462 | def _performrebase(self, tr): |
|
464 | 463 | self._assignworkingcopy() |
|
465 | 464 | repo, ui = self.repo, self.ui |
|
466 | 465 | if self.keepbranchesf: |
|
467 | 466 | # insert _savebranch at the start of extrafns so if |
|
468 | 467 | # there's a user-provided extrafn it can clobber branch if |
|
469 | 468 | # desired |
|
470 | 469 | self.extrafns.insert(0, _savebranch) |
|
471 | 470 | if self.collapsef: |
|
472 | 471 | branches = set() |
|
473 | 472 | for rev in self.state: |
|
474 | 473 | branches.add(repo[rev].branch()) |
|
475 | 474 | if len(branches) > 1: |
|
476 | 475 | raise error.Abort( |
|
477 | 476 | _(b'cannot collapse multiple named branches') |
|
478 | 477 | ) |
|
479 | 478 | |
|
480 | 479 | # Calculate self.obsoletenotrebased |
|
481 | 480 | obsrevs = _filterobsoleterevs(self.repo, self.state) |
|
482 | 481 | self._handleskippingobsolete(obsrevs, self.destmap) |
|
483 | 482 | |
|
484 | 483 | # Keep track of the active bookmarks in order to reset them later |
|
485 | 484 | self.activebookmark = self.activebookmark or repo._activebookmark |
|
486 | 485 | if self.activebookmark: |
|
487 | 486 | bookmarks.deactivate(repo) |
|
488 | 487 | |
|
489 | 488 | # Store the state before we begin so users can run 'hg rebase --abort' |
|
490 | 489 | # if we fail before the transaction closes. |
|
491 | 490 | self.storestatus() |
|
492 | 491 | if tr: |
|
493 | 492 | # When using single transaction, store state when transaction |
|
494 | 493 | # commits. |
|
495 | 494 | self.storestatus(tr) |
|
496 | 495 | |
|
497 | 496 | cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo] |
|
498 | 497 | p = repo.ui.makeprogress( |
|
499 | 498 | _(b"rebasing"), unit=_(b'changesets'), total=len(cands) |
|
500 | 499 | ) |
|
501 | 500 | |
|
502 | 501 | def progress(ctx): |
|
503 | 502 | p.increment(item=(b"%d:%s" % (ctx.rev(), ctx))) |
|
504 | 503 | |
|
505 | 504 | allowdivergence = self.ui.configbool( |
|
506 | 505 | b'experimental', b'evolution.allowdivergence' |
|
507 | 506 | ) |
|
508 | 507 | for subset in sortsource(self.destmap): |
|
509 | 508 | sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset) |
|
510 | 509 | if not allowdivergence: |
|
511 | 510 | sortedrevs -= self.repo.revs( |
|
512 | 511 | b'descendants(%ld) and not %ld', |
|
513 | 512 | self.obsoletewithoutsuccessorindestination, |
|
514 | 513 | self.obsoletewithoutsuccessorindestination, |
|
515 | 514 | ) |
|
516 | 515 | for rev in sortedrevs: |
|
517 | 516 | self._rebasenode(tr, rev, allowdivergence, progress) |
|
518 | 517 | p.complete() |
|
519 | 518 | ui.note(_(b'rebase merging completed\n')) |
|
520 | 519 | |
|
521 | 520 | def _concludenode(self, rev, editor, commitmsg=None): |
|
522 | 521 | '''Commit the wd changes with parents p1 and p2. |
|
523 | 522 | |
|
524 | 523 | Reuse commit info from rev but also store useful information in extra. |
|
525 | 524 | Return node of committed revision.''' |
|
526 | 525 | repo = self.repo |
|
527 | 526 | ctx = repo[rev] |
|
528 | 527 | if commitmsg is None: |
|
529 | 528 | commitmsg = ctx.description() |
|
530 | 529 | date = self.date |
|
531 | 530 | if date is None: |
|
532 | 531 | date = ctx.date() |
|
533 | 532 | extra = {b'rebase_source': ctx.hex()} |
|
534 | 533 | for c in self.extrafns: |
|
535 | 534 | c(ctx, extra) |
|
536 | 535 | destphase = max(ctx.phase(), phases.draft) |
|
537 | 536 | overrides = { |
|
538 | 537 | (b'phases', b'new-commit'): destphase, |
|
539 | 538 | (b'ui', b'allowemptycommit'): not self.skipemptysuccessorf, |
|
540 | 539 | } |
|
541 | 540 | with repo.ui.configoverride(overrides, b'rebase'): |
|
542 | 541 | if self.inmemory: |
|
543 | 542 | newnode = commitmemorynode( |
|
544 | 543 | repo, |
|
545 | 544 | wctx=self.wctx, |
|
546 | 545 | extra=extra, |
|
547 | 546 | commitmsg=commitmsg, |
|
548 | 547 | editor=editor, |
|
549 | 548 | user=ctx.user(), |
|
550 | 549 | date=date, |
|
551 | 550 | ) |
|
552 | 551 | else: |
|
553 | 552 | newnode = commitnode( |
|
554 | 553 | repo, |
|
555 | 554 | extra=extra, |
|
556 | 555 | commitmsg=commitmsg, |
|
557 | 556 | editor=editor, |
|
558 | 557 | user=ctx.user(), |
|
559 | 558 | date=date, |
|
560 | 559 | ) |
|
561 | 560 | |
|
562 | 561 | return newnode |
|
563 | 562 | |
|
564 | 563 | def _rebasenode(self, tr, rev, allowdivergence, progressfn): |
|
565 | 564 | repo, ui, opts = self.repo, self.ui, self.opts |
|
566 | 565 | ctx = repo[rev] |
|
567 | 566 | desc = _ctxdesc(ctx) |
|
568 | 567 | if self.state[rev] == rev: |
|
569 | 568 | ui.status(_(b'already rebased %s\n') % desc) |
|
570 | 569 | elif ( |
|
571 | 570 | not allowdivergence |
|
572 | 571 | and rev in self.obsoletewithoutsuccessorindestination |
|
573 | 572 | ): |
|
574 | 573 | msg = ( |
|
575 | 574 | _( |
|
576 | 575 | b'note: not rebasing %s and its descendants as ' |
|
577 | 576 | b'this would cause divergence\n' |
|
578 | 577 | ) |
|
579 | 578 | % desc |
|
580 | 579 | ) |
|
581 | 580 | repo.ui.status(msg) |
|
582 | 581 | self.skipped.add(rev) |
|
583 | 582 | elif rev in self.obsoletenotrebased: |
|
584 | 583 | succ = self.obsoletenotrebased[rev] |
|
585 | 584 | if succ is None: |
|
586 | 585 | msg = _(b'note: not rebasing %s, it has no successor\n') % desc |
|
587 | 586 | else: |
|
588 | 587 | succdesc = _ctxdesc(repo[succ]) |
|
589 | 588 | msg = _( |
|
590 | 589 | b'note: not rebasing %s, already in destination as %s\n' |
|
591 | 590 | ) % (desc, succdesc) |
|
592 | 591 | repo.ui.status(msg) |
|
593 | 592 | # Make clearrebased aware state[rev] is not a true successor |
|
594 | 593 | self.skipped.add(rev) |
|
595 | 594 | # Record rev as moved to its desired destination in self.state. |
|
596 | 595 | # This helps bookmark and working parent movement. |
|
597 | 596 | dest = max( |
|
598 | 597 | adjustdest(repo, rev, self.destmap, self.state, self.skipped) |
|
599 | 598 | ) |
|
600 | 599 | self.state[rev] = dest |
|
601 | 600 | elif self.state[rev] == revtodo: |
|
602 | 601 | ui.status(_(b'rebasing %s\n') % desc) |
|
603 | 602 | progressfn(ctx) |
|
604 | 603 | p1, p2, base = defineparents( |
|
605 | 604 | repo, |
|
606 | 605 | rev, |
|
607 | 606 | self.destmap, |
|
608 | 607 | self.state, |
|
609 | 608 | self.skipped, |
|
610 | 609 | self.obsoletenotrebased, |
|
611 | 610 | ) |
|
612 | 611 | if self.resume and self.wctx.p1().rev() == p1: |
|
613 | 612 | repo.ui.debug(b'resuming interrupted rebase\n') |
|
614 | 613 | self.resume = False |
|
615 | 614 | else: |
|
616 | 615 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} |
|
617 | 616 | with ui.configoverride(overrides, b'rebase'): |
|
618 | 617 | try: |
|
619 | 618 | rebasenode( |
|
620 | 619 | repo, |
|
621 | 620 | rev, |
|
622 | 621 | p1, |
|
623 | 622 | p2, |
|
624 | 623 | base, |
|
625 | 624 | self.collapsef, |
|
626 | 625 | wctx=self.wctx, |
|
627 | 626 | ) |
|
628 | 627 | except error.InMemoryMergeConflictsError: |
|
629 | 628 | if self.dryrun: |
|
630 | 629 | raise error.ConflictResolutionRequired(b'rebase') |
|
631 | 630 | if self.collapsef: |
|
632 | 631 | # TODO: Make the overlayworkingctx reflected |
|
633 | 632 | # in the working copy here instead of re-raising |
|
634 | 633 | # so the entire rebase operation is retried. |
|
635 | 634 | raise |
|
636 | 635 | ui.status( |
|
637 | 636 | _( |
|
638 | 637 | b"hit merge conflicts; rebasing that " |
|
639 | 638 | b"commit again in the working copy\n" |
|
640 | 639 | ) |
|
641 | 640 | ) |
|
642 | 641 | cmdutil.bailifchanged(repo) |
|
643 | 642 | self.inmemory = False |
|
644 | 643 | self._assignworkingcopy() |
|
645 | 644 | mergemod.update(repo[p1], wc=self.wctx) |
|
646 | 645 | rebasenode( |
|
647 | 646 | repo, |
|
648 | 647 | rev, |
|
649 | 648 | p1, |
|
650 | 649 | p2, |
|
651 | 650 | base, |
|
652 | 651 | self.collapsef, |
|
653 | 652 | wctx=self.wctx, |
|
654 | 653 | ) |
|
655 | 654 | if not self.collapsef: |
|
656 | 655 | merging = p2 != nullrev |
|
657 | 656 | editform = cmdutil.mergeeditform(merging, b'rebase') |
|
658 | 657 | editor = cmdutil.getcommiteditor( |
|
659 | 658 | editform=editform, **pycompat.strkwargs(opts) |
|
660 | 659 | ) |
|
661 | 660 | # We need to set parents again here just in case we're continuing |
|
662 | 661 | # a rebase started with an old hg version (before 9c9cfecd4600), |
|
663 | 662 | # because those old versions would have left us with two dirstate |
|
664 | 663 | # parents, and we don't want to create a merge commit here (unless |
|
665 | 664 | # we're rebasing a merge commit). |
|
666 | 665 | self.wctx.setparents(repo[p1].node(), repo[p2].node()) |
|
667 | 666 | newnode = self._concludenode(rev, editor) |
|
668 | 667 | else: |
|
669 | 668 | # Skip commit if we are collapsing |
|
670 | 669 | newnode = None |
|
671 | 670 | # Update the state |
|
672 | 671 | if newnode is not None: |
|
673 | 672 | self.state[rev] = repo[newnode].rev() |
|
674 | 673 | ui.debug(b'rebased as %s\n' % short(newnode)) |
|
675 | 674 | if repo[newnode].isempty(): |
|
676 | 675 | ui.warn( |
|
677 | 676 | _( |
|
678 | 677 | b'note: created empty successor for %s, its ' |
|
679 | 678 | b'destination already has all its changes\n' |
|
680 | 679 | ) |
|
681 | 680 | % desc |
|
682 | 681 | ) |
|
683 | 682 | else: |
|
684 | 683 | if not self.collapsef: |
|
685 | 684 | ui.warn( |
|
686 | 685 | _( |
|
687 | 686 | b'note: not rebasing %s, its destination already ' |
|
688 | 687 | b'has all its changes\n' |
|
689 | 688 | ) |
|
690 | 689 | % desc |
|
691 | 690 | ) |
|
692 | 691 | self.skipped.add(rev) |
|
693 | 692 | self.state[rev] = p1 |
|
694 | 693 | ui.debug(b'next revision set to %d\n' % p1) |
|
695 | 694 | else: |
|
696 | 695 | ui.status( |
|
697 | 696 | _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]]) |
|
698 | 697 | ) |
|
699 | 698 | if not tr: |
|
700 | 699 | # When not using single transaction, store state after each |
|
701 | 700 | # commit is completely done. On InterventionRequired, we thus |
|
702 | 701 | # won't store the status. Instead, we'll hit the "len(parents) == 2" |
|
703 | 702 | # case and realize that the commit was in progress. |
|
704 | 703 | self.storestatus() |
|
705 | 704 | |
|
706 | 705 | def _finishrebase(self): |
|
707 | 706 | repo, ui, opts = self.repo, self.ui, self.opts |
|
708 | 707 | fm = ui.formatter(b'rebase', opts) |
|
709 | 708 | fm.startitem() |
|
710 | 709 | if self.collapsef: |
|
711 | 710 | p1, p2, _base = defineparents( |
|
712 | 711 | repo, |
|
713 | 712 | min(self.state), |
|
714 | 713 | self.destmap, |
|
715 | 714 | self.state, |
|
716 | 715 | self.skipped, |
|
717 | 716 | self.obsoletenotrebased, |
|
718 | 717 | ) |
|
719 | 718 | editopt = opts.get(b'edit') |
|
720 | 719 | editform = b'rebase.collapse' |
|
721 | 720 | if self.collapsemsg: |
|
722 | 721 | commitmsg = self.collapsemsg |
|
723 | 722 | else: |
|
724 | 723 | commitmsg = b'Collapsed revision' |
|
725 | 724 | for rebased in sorted(self.state): |
|
726 | 725 | if rebased not in self.skipped: |
|
727 | 726 | commitmsg += b'\n* %s' % repo[rebased].description() |
|
728 | 727 | editopt = True |
|
729 | 728 | editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) |
|
730 | 729 | revtoreuse = max(self.state) |
|
731 | 730 | |
|
732 | 731 | self.wctx.setparents(repo[p1].node(), repo[self.external].node()) |
|
733 | 732 | newnode = self._concludenode( |
|
734 | 733 | revtoreuse, editor, commitmsg=commitmsg |
|
735 | 734 | ) |
|
736 | 735 | |
|
737 | 736 | if newnode is not None: |
|
738 | 737 | newrev = repo[newnode].rev() |
|
739 | 738 | for oldrev in self.state: |
|
740 | 739 | self.state[oldrev] = newrev |
|
741 | 740 | |
|
742 | 741 | if b'qtip' in repo.tags(): |
|
743 | 742 | updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts)) |
|
744 | 743 | |
|
745 | 744 | # restore original working directory |
|
746 | 745 | # (we do this before stripping) |
|
747 | 746 | newwd = self.state.get(self.originalwd, self.originalwd) |
|
748 | 747 | if newwd < 0: |
|
749 | 748 | # original directory is a parent of rebase set root or ignored |
|
750 | 749 | newwd = self.originalwd |
|
751 | 750 | if newwd not in [c.rev() for c in repo[None].parents()]: |
|
752 | 751 | ui.note(_(b"update back to initial working directory parent\n")) |
|
753 |
|
|
|
752 | mergemod.update(repo[newwd]) | |
|
754 | 753 | |
|
755 | 754 | collapsedas = None |
|
756 | 755 | if self.collapsef and not self.keepf: |
|
757 | 756 | collapsedas = newnode |
|
758 | 757 | clearrebased( |
|
759 | 758 | ui, |
|
760 | 759 | repo, |
|
761 | 760 | self.destmap, |
|
762 | 761 | self.state, |
|
763 | 762 | self.skipped, |
|
764 | 763 | collapsedas, |
|
765 | 764 | self.keepf, |
|
766 | 765 | fm=fm, |
|
767 | 766 | backup=self.backupf, |
|
768 | 767 | ) |
|
769 | 768 | |
|
770 | 769 | clearstatus(repo) |
|
771 | 770 | clearcollapsemsg(repo) |
|
772 | 771 | |
|
773 | 772 | ui.note(_(b"rebase completed\n")) |
|
774 | 773 | util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True) |
|
775 | 774 | if self.skipped: |
|
776 | 775 | skippedlen = len(self.skipped) |
|
777 | 776 | ui.note(_(b"%d revisions have been skipped\n") % skippedlen) |
|
778 | 777 | fm.end() |
|
779 | 778 | |
|
780 | 779 | if ( |
|
781 | 780 | self.activebookmark |
|
782 | 781 | and self.activebookmark in repo._bookmarks |
|
783 | 782 | and repo[b'.'].node() == repo._bookmarks[self.activebookmark] |
|
784 | 783 | ): |
|
785 | 784 | bookmarks.activate(repo, self.activebookmark) |
|
786 | 785 | |
|
787 | 786 | def _abort(self, backup=True, suppwarns=False, dryrun=False, confirm=False): |
|
788 | 787 | '''Restore the repository to its original state.''' |
|
789 | 788 | |
|
790 | 789 | repo = self.repo |
|
791 | 790 | try: |
|
792 | 791 | # If the first commits in the rebased set get skipped during the |
|
793 | 792 | # rebase, their values within the state mapping will be the dest |
|
794 | 793 | # rev id. The rebased list must must not contain the dest rev |
|
795 | 794 | # (issue4896) |
|
796 | 795 | rebased = [ |
|
797 | 796 | s |
|
798 | 797 | for r, s in self.state.items() |
|
799 | 798 | if s >= 0 and s != r and s != self.destmap[r] |
|
800 | 799 | ] |
|
801 | 800 | immutable = [d for d in rebased if not repo[d].mutable()] |
|
802 | 801 | cleanup = True |
|
803 | 802 | if immutable: |
|
804 | 803 | repo.ui.warn( |
|
805 | 804 | _(b"warning: can't clean up public changesets %s\n") |
|
806 | 805 | % b', '.join(bytes(repo[r]) for r in immutable), |
|
807 | 806 | hint=_(b"see 'hg help phases' for details"), |
|
808 | 807 | ) |
|
809 | 808 | cleanup = False |
|
810 | 809 | |
|
811 | 810 | descendants = set() |
|
812 | 811 | if rebased: |
|
813 | 812 | descendants = set(repo.changelog.descendants(rebased)) |
|
814 | 813 | if descendants - set(rebased): |
|
815 | 814 | repo.ui.warn( |
|
816 | 815 | _( |
|
817 | 816 | b"warning: new changesets detected on " |
|
818 | 817 | b"destination branch, can't strip\n" |
|
819 | 818 | ) |
|
820 | 819 | ) |
|
821 | 820 | cleanup = False |
|
822 | 821 | |
|
823 | 822 | if cleanup: |
|
824 | 823 | if rebased: |
|
825 | 824 | strippoints = [ |
|
826 | 825 | c.node() for c in repo.set(b'roots(%ld)', rebased) |
|
827 | 826 | ] |
|
828 | 827 | |
|
829 | 828 | updateifonnodes = set(rebased) |
|
830 | 829 | updateifonnodes.update(self.destmap.values()) |
|
831 | 830 | |
|
832 | 831 | if not dryrun and not confirm: |
|
833 | 832 | updateifonnodes.add(self.originalwd) |
|
834 | 833 | |
|
835 | 834 | shouldupdate = repo[b'.'].rev() in updateifonnodes |
|
836 | 835 | |
|
837 | 836 | # Update away from the rebase if necessary |
|
838 | 837 | if shouldupdate: |
|
839 | 838 | mergemod.clean_update(repo[self.originalwd]) |
|
840 | 839 | |
|
841 | 840 | # Strip from the first rebased revision |
|
842 | 841 | if rebased: |
|
843 | 842 | repair.strip(repo.ui, repo, strippoints, backup=backup) |
|
844 | 843 | |
|
845 | 844 | if self.activebookmark and self.activebookmark in repo._bookmarks: |
|
846 | 845 | bookmarks.activate(repo, self.activebookmark) |
|
847 | 846 | |
|
848 | 847 | finally: |
|
849 | 848 | clearstatus(repo) |
|
850 | 849 | clearcollapsemsg(repo) |
|
851 | 850 | if not suppwarns: |
|
852 | 851 | repo.ui.warn(_(b'rebase aborted\n')) |
|
853 | 852 | return 0 |
|
854 | 853 | |
|
855 | 854 | |
|
856 | 855 | @command( |
|
857 | 856 | b'rebase', |
|
858 | 857 | [ |
|
859 | 858 | ( |
|
860 | 859 | b's', |
|
861 | 860 | b'source', |
|
862 | 861 | [], |
|
863 | 862 | _(b'rebase the specified changesets and their descendants'), |
|
864 | 863 | _(b'REV'), |
|
865 | 864 | ), |
|
866 | 865 | ( |
|
867 | 866 | b'b', |
|
868 | 867 | b'base', |
|
869 | 868 | [], |
|
870 | 869 | _(b'rebase everything from branching point of specified changeset'), |
|
871 | 870 | _(b'REV'), |
|
872 | 871 | ), |
|
873 | 872 | (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')), |
|
874 | 873 | ( |
|
875 | 874 | b'd', |
|
876 | 875 | b'dest', |
|
877 | 876 | b'', |
|
878 | 877 | _(b'rebase onto the specified changeset'), |
|
879 | 878 | _(b'REV'), |
|
880 | 879 | ), |
|
881 | 880 | (b'', b'collapse', False, _(b'collapse the rebased changesets')), |
|
882 | 881 | ( |
|
883 | 882 | b'm', |
|
884 | 883 | b'message', |
|
885 | 884 | b'', |
|
886 | 885 | _(b'use text as collapse commit message'), |
|
887 | 886 | _(b'TEXT'), |
|
888 | 887 | ), |
|
889 | 888 | (b'e', b'edit', False, _(b'invoke editor on commit messages')), |
|
890 | 889 | ( |
|
891 | 890 | b'l', |
|
892 | 891 | b'logfile', |
|
893 | 892 | b'', |
|
894 | 893 | _(b'read collapse commit message from file'), |
|
895 | 894 | _(b'FILE'), |
|
896 | 895 | ), |
|
897 | 896 | (b'k', b'keep', False, _(b'keep original changesets')), |
|
898 | 897 | (b'', b'keepbranches', False, _(b'keep original branch names')), |
|
899 | 898 | (b'D', b'detach', False, _(b'(DEPRECATED)')), |
|
900 | 899 | (b'i', b'interactive', False, _(b'(DEPRECATED)')), |
|
901 | 900 | (b't', b'tool', b'', _(b'specify merge tool')), |
|
902 | 901 | (b'', b'stop', False, _(b'stop interrupted rebase')), |
|
903 | 902 | (b'c', b'continue', False, _(b'continue an interrupted rebase')), |
|
904 | 903 | (b'a', b'abort', False, _(b'abort an interrupted rebase')), |
|
905 | 904 | ( |
|
906 | 905 | b'', |
|
907 | 906 | b'auto-orphans', |
|
908 | 907 | b'', |
|
909 | 908 | _( |
|
910 | 909 | b'automatically rebase orphan revisions ' |
|
911 | 910 | b'in the specified revset (EXPERIMENTAL)' |
|
912 | 911 | ), |
|
913 | 912 | ), |
|
914 | 913 | ] |
|
915 | 914 | + cmdutil.dryrunopts |
|
916 | 915 | + cmdutil.formatteropts |
|
917 | 916 | + cmdutil.confirmopts, |
|
918 | 917 | _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'), |
|
919 | 918 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, |
|
920 | 919 | ) |
|
921 | 920 | def rebase(ui, repo, **opts): |
|
922 | 921 | """move changeset (and descendants) to a different branch |
|
923 | 922 | |
|
924 | 923 | Rebase uses repeated merging to graft changesets from one part of |
|
925 | 924 | history (the source) onto another (the destination). This can be |
|
926 | 925 | useful for linearizing *local* changes relative to a master |
|
927 | 926 | development tree. |
|
928 | 927 | |
|
929 | 928 | Published commits cannot be rebased (see :hg:`help phases`). |
|
930 | 929 | To copy commits, see :hg:`help graft`. |
|
931 | 930 | |
|
932 | 931 | If you don't specify a destination changeset (``-d/--dest``), rebase |
|
933 | 932 | will use the same logic as :hg:`merge` to pick a destination. if |
|
934 | 933 | the current branch contains exactly one other head, the other head |
|
935 | 934 | is merged with by default. Otherwise, an explicit revision with |
|
936 | 935 | which to merge with must be provided. (destination changeset is not |
|
937 | 936 | modified by rebasing, but new changesets are added as its |
|
938 | 937 | descendants.) |
|
939 | 938 | |
|
940 | 939 | Here are the ways to select changesets: |
|
941 | 940 | |
|
942 | 941 | 1. Explicitly select them using ``--rev``. |
|
943 | 942 | |
|
944 | 943 | 2. Use ``--source`` to select a root changeset and include all of its |
|
945 | 944 | descendants. |
|
946 | 945 | |
|
947 | 946 | 3. Use ``--base`` to select a changeset; rebase will find ancestors |
|
948 | 947 | and their descendants which are not also ancestors of the destination. |
|
949 | 948 | |
|
950 | 949 | 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``, |
|
951 | 950 | rebase will use ``--base .`` as above. |
|
952 | 951 | |
|
953 | 952 | If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC`` |
|
954 | 953 | can be used in ``--dest``. Destination would be calculated per source |
|
955 | 954 | revision with ``SRC`` substituted by that single source revision and |
|
956 | 955 | ``ALLSRC`` substituted by all source revisions. |
|
957 | 956 | |
|
958 | 957 | Rebase will destroy original changesets unless you use ``--keep``. |
|
959 | 958 | It will also move your bookmarks (even if you do). |
|
960 | 959 | |
|
961 | 960 | Some changesets may be dropped if they do not contribute changes |
|
962 | 961 | (e.g. merges from the destination branch). |
|
963 | 962 | |
|
964 | 963 | Unlike ``merge``, rebase will do nothing if you are at the branch tip of |
|
965 | 964 | a named branch with two heads. You will need to explicitly specify source |
|
966 | 965 | and/or destination. |
|
967 | 966 | |
|
968 | 967 | If you need to use a tool to automate merge/conflict decisions, you |
|
969 | 968 | can specify one with ``--tool``, see :hg:`help merge-tools`. |
|
970 | 969 | As a caveat: the tool will not be used to mediate when a file was |
|
971 | 970 | deleted, there is no hook presently available for this. |
|
972 | 971 | |
|
973 | 972 | If a rebase is interrupted to manually resolve a conflict, it can be |
|
974 | 973 | continued with --continue/-c, aborted with --abort/-a, or stopped with |
|
975 | 974 | --stop. |
|
976 | 975 | |
|
977 | 976 | .. container:: verbose |
|
978 | 977 | |
|
979 | 978 | Examples: |
|
980 | 979 | |
|
981 | 980 | - move "local changes" (current commit back to branching point) |
|
982 | 981 | to the current branch tip after a pull:: |
|
983 | 982 | |
|
984 | 983 | hg rebase |
|
985 | 984 | |
|
986 | 985 | - move a single changeset to the stable branch:: |
|
987 | 986 | |
|
988 | 987 | hg rebase -r 5f493448 -d stable |
|
989 | 988 | |
|
990 | 989 | - splice a commit and all its descendants onto another part of history:: |
|
991 | 990 | |
|
992 | 991 | hg rebase --source c0c3 --dest 4cf9 |
|
993 | 992 | |
|
994 | 993 | - rebase everything on a branch marked by a bookmark onto the |
|
995 | 994 | default branch:: |
|
996 | 995 | |
|
997 | 996 | hg rebase --base myfeature --dest default |
|
998 | 997 | |
|
999 | 998 | - collapse a sequence of changes into a single commit:: |
|
1000 | 999 | |
|
1001 | 1000 | hg rebase --collapse -r 1520:1525 -d . |
|
1002 | 1001 | |
|
1003 | 1002 | - move a named branch while preserving its name:: |
|
1004 | 1003 | |
|
1005 | 1004 | hg rebase -r "branch(featureX)" -d 1.3 --keepbranches |
|
1006 | 1005 | |
|
1007 | 1006 | - stabilize orphaned changesets so history looks linear:: |
|
1008 | 1007 | |
|
1009 | 1008 | hg rebase -r 'orphan()-obsolete()'\ |
|
1010 | 1009 | -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\ |
|
1011 | 1010 | max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))' |
|
1012 | 1011 | |
|
1013 | 1012 | Configuration Options: |
|
1014 | 1013 | |
|
1015 | 1014 | You can make rebase require a destination if you set the following config |
|
1016 | 1015 | option:: |
|
1017 | 1016 | |
|
1018 | 1017 | [commands] |
|
1019 | 1018 | rebase.requiredest = True |
|
1020 | 1019 | |
|
1021 | 1020 | By default, rebase will close the transaction after each commit. For |
|
1022 | 1021 | performance purposes, you can configure rebase to use a single transaction |
|
1023 | 1022 | across the entire rebase. WARNING: This setting introduces a significant |
|
1024 | 1023 | risk of losing the work you've done in a rebase if the rebase aborts |
|
1025 | 1024 | unexpectedly:: |
|
1026 | 1025 | |
|
1027 | 1026 | [rebase] |
|
1028 | 1027 | singletransaction = True |
|
1029 | 1028 | |
|
1030 | 1029 | By default, rebase writes to the working copy, but you can configure it to |
|
1031 | 1030 | run in-memory for better performance. When the rebase is not moving the |
|
1032 | 1031 | parent(s) of the working copy (AKA the "currently checked out changesets"), |
|
1033 | 1032 | this may also allow it to run even if the working copy is dirty:: |
|
1034 | 1033 | |
|
1035 | 1034 | [rebase] |
|
1036 | 1035 | experimental.inmemory = True |
|
1037 | 1036 | |
|
1038 | 1037 | Return Values: |
|
1039 | 1038 | |
|
1040 | 1039 | Returns 0 on success, 1 if nothing to rebase or there are |
|
1041 | 1040 | unresolved conflicts. |
|
1042 | 1041 | |
|
1043 | 1042 | """ |
|
1044 | 1043 | opts = pycompat.byteskwargs(opts) |
|
1045 | 1044 | inmemory = ui.configbool(b'rebase', b'experimental.inmemory') |
|
1046 | 1045 | action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue') |
|
1047 | 1046 | if action: |
|
1048 | 1047 | cmdutil.check_incompatible_arguments( |
|
1049 | 1048 | opts, action, [b'confirm', b'dry_run'] |
|
1050 | 1049 | ) |
|
1051 | 1050 | cmdutil.check_incompatible_arguments( |
|
1052 | 1051 | opts, action, [b'rev', b'source', b'base', b'dest'] |
|
1053 | 1052 | ) |
|
1054 | 1053 | cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run') |
|
1055 | 1054 | cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base') |
|
1056 | 1055 | |
|
1057 | 1056 | if action or repo.currenttransaction() is not None: |
|
1058 | 1057 | # in-memory rebase is not compatible with resuming rebases. |
|
1059 | 1058 | # (Or if it is run within a transaction, since the restart logic can |
|
1060 | 1059 | # fail the entire transaction.) |
|
1061 | 1060 | inmemory = False |
|
1062 | 1061 | |
|
1063 | 1062 | if opts.get(b'auto_orphans'): |
|
1064 | 1063 | disallowed_opts = set(opts) - {b'auto_orphans'} |
|
1065 | 1064 | cmdutil.check_incompatible_arguments( |
|
1066 | 1065 | opts, b'auto_orphans', disallowed_opts |
|
1067 | 1066 | ) |
|
1068 | 1067 | |
|
1069 | 1068 | userrevs = list(repo.revs(opts.get(b'auto_orphans'))) |
|
1070 | 1069 | opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)] |
|
1071 | 1070 | opts[b'dest'] = b'_destautoorphanrebase(SRC)' |
|
1072 | 1071 | |
|
1073 | 1072 | if opts.get(b'dry_run') or opts.get(b'confirm'): |
|
1074 | 1073 | return _dryrunrebase(ui, repo, action, opts) |
|
1075 | 1074 | elif action == b'stop': |
|
1076 | 1075 | rbsrt = rebaseruntime(repo, ui) |
|
1077 | 1076 | with repo.wlock(), repo.lock(): |
|
1078 | 1077 | rbsrt.restorestatus() |
|
1079 | 1078 | if rbsrt.collapsef: |
|
1080 | 1079 | raise error.Abort(_(b"cannot stop in --collapse session")) |
|
1081 | 1080 | allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) |
|
1082 | 1081 | if not (rbsrt.keepf or allowunstable): |
|
1083 | 1082 | raise error.Abort( |
|
1084 | 1083 | _( |
|
1085 | 1084 | b"cannot remove original changesets with" |
|
1086 | 1085 | b" unrebased descendants" |
|
1087 | 1086 | ), |
|
1088 | 1087 | hint=_( |
|
1089 | 1088 | b'either enable obsmarkers to allow unstable ' |
|
1090 | 1089 | b'revisions or use --keep to keep original ' |
|
1091 | 1090 | b'changesets' |
|
1092 | 1091 | ), |
|
1093 | 1092 | ) |
|
1094 | 1093 | # update to the current working revision |
|
1095 | 1094 | # to clear interrupted merge |
|
1096 | 1095 | mergemod.clean_update(repo[rbsrt.originalwd]) |
|
1097 | 1096 | rbsrt._finishrebase() |
|
1098 | 1097 | return 0 |
|
1099 | 1098 | elif inmemory: |
|
1100 | 1099 | try: |
|
1101 | 1100 | # in-memory merge doesn't support conflicts, so if we hit any, abort |
|
1102 | 1101 | # and re-run as an on-disk merge. |
|
1103 | 1102 | overrides = {(b'rebase', b'singletransaction'): True} |
|
1104 | 1103 | with ui.configoverride(overrides, b'rebase'): |
|
1105 | 1104 | return _dorebase(ui, repo, action, opts, inmemory=inmemory) |
|
1106 | 1105 | except error.InMemoryMergeConflictsError: |
|
1107 | 1106 | ui.warn( |
|
1108 | 1107 | _( |
|
1109 | 1108 | b'hit merge conflicts; re-running rebase without in-memory' |
|
1110 | 1109 | b' merge\n' |
|
1111 | 1110 | ) |
|
1112 | 1111 | ) |
|
1113 | 1112 | clearstatus(repo) |
|
1114 | 1113 | clearcollapsemsg(repo) |
|
1115 | 1114 | return _dorebase(ui, repo, action, opts, inmemory=False) |
|
1116 | 1115 | else: |
|
1117 | 1116 | return _dorebase(ui, repo, action, opts) |
|
1118 | 1117 | |
|
1119 | 1118 | |
|
1120 | 1119 | def _dryrunrebase(ui, repo, action, opts): |
|
1121 | 1120 | rbsrt = rebaseruntime(repo, ui, inmemory=True, dryrun=True, opts=opts) |
|
1122 | 1121 | confirm = opts.get(b'confirm') |
|
1123 | 1122 | if confirm: |
|
1124 | 1123 | ui.status(_(b'starting in-memory rebase\n')) |
|
1125 | 1124 | else: |
|
1126 | 1125 | ui.status( |
|
1127 | 1126 | _(b'starting dry-run rebase; repository will not be changed\n') |
|
1128 | 1127 | ) |
|
1129 | 1128 | with repo.wlock(), repo.lock(): |
|
1130 | 1129 | needsabort = True |
|
1131 | 1130 | try: |
|
1132 | 1131 | overrides = {(b'rebase', b'singletransaction'): True} |
|
1133 | 1132 | with ui.configoverride(overrides, b'rebase'): |
|
1134 | 1133 | _origrebase( |
|
1135 | 1134 | ui, repo, action, opts, rbsrt, |
|
1136 | 1135 | ) |
|
1137 | 1136 | except error.ConflictResolutionRequired: |
|
1138 | 1137 | ui.status(_(b'hit a merge conflict\n')) |
|
1139 | 1138 | return 1 |
|
1140 | 1139 | except error.Abort: |
|
1141 | 1140 | needsabort = False |
|
1142 | 1141 | raise |
|
1143 | 1142 | else: |
|
1144 | 1143 | if confirm: |
|
1145 | 1144 | ui.status(_(b'rebase completed successfully\n')) |
|
1146 | 1145 | if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')): |
|
1147 | 1146 | # finish unfinished rebase |
|
1148 | 1147 | rbsrt._finishrebase() |
|
1149 | 1148 | else: |
|
1150 | 1149 | rbsrt._prepareabortorcontinue( |
|
1151 | 1150 | isabort=True, |
|
1152 | 1151 | backup=False, |
|
1153 | 1152 | suppwarns=True, |
|
1154 | 1153 | confirm=confirm, |
|
1155 | 1154 | ) |
|
1156 | 1155 | needsabort = False |
|
1157 | 1156 | else: |
|
1158 | 1157 | ui.status( |
|
1159 | 1158 | _( |
|
1160 | 1159 | b'dry-run rebase completed successfully; run without' |
|
1161 | 1160 | b' -n/--dry-run to perform this rebase\n' |
|
1162 | 1161 | ) |
|
1163 | 1162 | ) |
|
1164 | 1163 | return 0 |
|
1165 | 1164 | finally: |
|
1166 | 1165 | if needsabort: |
|
1167 | 1166 | # no need to store backup in case of dryrun |
|
1168 | 1167 | rbsrt._prepareabortorcontinue( |
|
1169 | 1168 | isabort=True, |
|
1170 | 1169 | backup=False, |
|
1171 | 1170 | suppwarns=True, |
|
1172 | 1171 | dryrun=opts.get(b'dry_run'), |
|
1173 | 1172 | ) |
|
1174 | 1173 | |
|
1175 | 1174 | |
|
1176 | 1175 | def _dorebase(ui, repo, action, opts, inmemory=False): |
|
1177 | 1176 | rbsrt = rebaseruntime(repo, ui, inmemory, opts=opts) |
|
1178 | 1177 | return _origrebase(ui, repo, action, opts, rbsrt) |
|
1179 | 1178 | |
|
1180 | 1179 | |
|
1181 | 1180 | def _origrebase(ui, repo, action, opts, rbsrt): |
|
1182 | 1181 | assert action != b'stop' |
|
1183 | 1182 | with repo.wlock(), repo.lock(): |
|
1184 | 1183 | if opts.get(b'interactive'): |
|
1185 | 1184 | try: |
|
1186 | 1185 | if extensions.find(b'histedit'): |
|
1187 | 1186 | enablehistedit = b'' |
|
1188 | 1187 | except KeyError: |
|
1189 | 1188 | enablehistedit = b" --config extensions.histedit=" |
|
1190 | 1189 | help = b"hg%s help -e histedit" % enablehistedit |
|
1191 | 1190 | msg = ( |
|
1192 | 1191 | _( |
|
1193 | 1192 | b"interactive history editing is supported by the " |
|
1194 | 1193 | b"'histedit' extension (see \"%s\")" |
|
1195 | 1194 | ) |
|
1196 | 1195 | % help |
|
1197 | 1196 | ) |
|
1198 | 1197 | raise error.Abort(msg) |
|
1199 | 1198 | |
|
1200 | 1199 | if rbsrt.collapsemsg and not rbsrt.collapsef: |
|
1201 | 1200 | raise error.Abort(_(b'message can only be specified with collapse')) |
|
1202 | 1201 | |
|
1203 | 1202 | if action: |
|
1204 | 1203 | if rbsrt.collapsef: |
|
1205 | 1204 | raise error.Abort( |
|
1206 | 1205 | _(b'cannot use collapse with continue or abort') |
|
1207 | 1206 | ) |
|
1208 | 1207 | if action == b'abort' and opts.get(b'tool', False): |
|
1209 | 1208 | ui.warn(_(b'tool option will be ignored\n')) |
|
1210 | 1209 | if action == b'continue': |
|
1211 | 1210 | ms = mergestatemod.mergestate.read(repo) |
|
1212 | 1211 | mergeutil.checkunresolved(ms) |
|
1213 | 1212 | |
|
1214 | 1213 | retcode = rbsrt._prepareabortorcontinue( |
|
1215 | 1214 | isabort=(action == b'abort') |
|
1216 | 1215 | ) |
|
1217 | 1216 | if retcode is not None: |
|
1218 | 1217 | return retcode |
|
1219 | 1218 | else: |
|
1220 | 1219 | # search default destination in this space |
|
1221 | 1220 | # used in the 'hg pull --rebase' case, see issue 5214. |
|
1222 | 1221 | destspace = opts.get(b'_destspace') |
|
1223 | 1222 | destmap = _definedestmap( |
|
1224 | 1223 | ui, |
|
1225 | 1224 | repo, |
|
1226 | 1225 | rbsrt.inmemory, |
|
1227 | 1226 | opts.get(b'dest', None), |
|
1228 | 1227 | opts.get(b'source', []), |
|
1229 | 1228 | opts.get(b'base', []), |
|
1230 | 1229 | opts.get(b'rev', []), |
|
1231 | 1230 | destspace=destspace, |
|
1232 | 1231 | ) |
|
1233 | 1232 | retcode = rbsrt._preparenewrebase(destmap) |
|
1234 | 1233 | if retcode is not None: |
|
1235 | 1234 | return retcode |
|
1236 | 1235 | storecollapsemsg(repo, rbsrt.collapsemsg) |
|
1237 | 1236 | |
|
1238 | 1237 | tr = None |
|
1239 | 1238 | |
|
1240 | 1239 | singletr = ui.configbool(b'rebase', b'singletransaction') |
|
1241 | 1240 | if singletr: |
|
1242 | 1241 | tr = repo.transaction(b'rebase') |
|
1243 | 1242 | |
|
1244 | 1243 | # If `rebase.singletransaction` is enabled, wrap the entire operation in |
|
1245 | 1244 | # one transaction here. Otherwise, transactions are obtained when |
|
1246 | 1245 | # committing each node, which is slower but allows partial success. |
|
1247 | 1246 | with util.acceptintervention(tr): |
|
1248 | 1247 | # Same logic for the dirstate guard, except we don't create one when |
|
1249 | 1248 | # rebasing in-memory (it's not needed). |
|
1250 | 1249 | dsguard = None |
|
1251 | 1250 | if singletr and not rbsrt.inmemory: |
|
1252 | 1251 | dsguard = dirstateguard.dirstateguard(repo, b'rebase') |
|
1253 | 1252 | with util.acceptintervention(dsguard): |
|
1254 | 1253 | rbsrt._performrebase(tr) |
|
1255 | 1254 | if not rbsrt.dryrun: |
|
1256 | 1255 | rbsrt._finishrebase() |
|
1257 | 1256 | |
|
1258 | 1257 | |
|
1259 | 1258 | def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace): |
|
1260 | 1259 | """use revisions argument to define destmap {srcrev: destrev}""" |
|
1261 | 1260 | if revf is None: |
|
1262 | 1261 | revf = [] |
|
1263 | 1262 | |
|
1264 | 1263 | # destspace is here to work around issues with `hg pull --rebase` see |
|
1265 | 1264 | # issue5214 for details |
|
1266 | 1265 | |
|
1267 | 1266 | cmdutil.checkunfinished(repo) |
|
1268 | 1267 | if not inmemory: |
|
1269 | 1268 | cmdutil.bailifchanged(repo) |
|
1270 | 1269 | |
|
1271 | 1270 | if ui.configbool(b'commands', b'rebase.requiredest') and not destf: |
|
1272 | 1271 | raise error.Abort( |
|
1273 | 1272 | _(b'you must specify a destination'), |
|
1274 | 1273 | hint=_(b'use: hg rebase -d REV'), |
|
1275 | 1274 | ) |
|
1276 | 1275 | |
|
1277 | 1276 | dest = None |
|
1278 | 1277 | |
|
1279 | 1278 | if revf: |
|
1280 | 1279 | rebaseset = scmutil.revrange(repo, revf) |
|
1281 | 1280 | if not rebaseset: |
|
1282 | 1281 | ui.status(_(b'empty "rev" revision set - nothing to rebase\n')) |
|
1283 | 1282 | return None |
|
1284 | 1283 | elif srcf: |
|
1285 | 1284 | src = scmutil.revrange(repo, srcf) |
|
1286 | 1285 | if not src: |
|
1287 | 1286 | ui.status(_(b'empty "source" revision set - nothing to rebase\n')) |
|
1288 | 1287 | return None |
|
1289 | 1288 | # `+ (%ld)` to work around `wdir()::` being empty |
|
1290 | 1289 | rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src) |
|
1291 | 1290 | else: |
|
1292 | 1291 | base = scmutil.revrange(repo, basef or [b'.']) |
|
1293 | 1292 | if not base: |
|
1294 | 1293 | ui.status( |
|
1295 | 1294 | _(b'empty "base" revision set - ' b"can't compute rebase set\n") |
|
1296 | 1295 | ) |
|
1297 | 1296 | return None |
|
1298 | 1297 | if destf: |
|
1299 | 1298 | # --base does not support multiple destinations |
|
1300 | 1299 | dest = scmutil.revsingle(repo, destf) |
|
1301 | 1300 | else: |
|
1302 | 1301 | dest = repo[_destrebase(repo, base, destspace=destspace)] |
|
1303 | 1302 | destf = bytes(dest) |
|
1304 | 1303 | |
|
1305 | 1304 | roots = [] # selected children of branching points |
|
1306 | 1305 | bpbase = {} # {branchingpoint: [origbase]} |
|
1307 | 1306 | for b in base: # group bases by branching points |
|
1308 | 1307 | bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first() |
|
1309 | 1308 | bpbase[bp] = bpbase.get(bp, []) + [b] |
|
1310 | 1309 | if None in bpbase: |
|
1311 | 1310 | # emulate the old behavior, showing "nothing to rebase" (a better |
|
1312 | 1311 | # behavior may be abort with "cannot find branching point" error) |
|
1313 | 1312 | bpbase.clear() |
|
1314 | 1313 | for bp, bs in pycompat.iteritems(bpbase): # calculate roots |
|
1315 | 1314 | roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs)) |
|
1316 | 1315 | |
|
1317 | 1316 | rebaseset = repo.revs(b'%ld::', roots) |
|
1318 | 1317 | |
|
1319 | 1318 | if not rebaseset: |
|
1320 | 1319 | # transform to list because smartsets are not comparable to |
|
1321 | 1320 | # lists. This should be improved to honor laziness of |
|
1322 | 1321 | # smartset. |
|
1323 | 1322 | if list(base) == [dest.rev()]: |
|
1324 | 1323 | if basef: |
|
1325 | 1324 | ui.status( |
|
1326 | 1325 | _( |
|
1327 | 1326 | b'nothing to rebase - %s is both "base"' |
|
1328 | 1327 | b' and destination\n' |
|
1329 | 1328 | ) |
|
1330 | 1329 | % dest |
|
1331 | 1330 | ) |
|
1332 | 1331 | else: |
|
1333 | 1332 | ui.status( |
|
1334 | 1333 | _( |
|
1335 | 1334 | b'nothing to rebase - working directory ' |
|
1336 | 1335 | b'parent is also destination\n' |
|
1337 | 1336 | ) |
|
1338 | 1337 | ) |
|
1339 | 1338 | elif not repo.revs(b'%ld - ::%d', base, dest.rev()): |
|
1340 | 1339 | if basef: |
|
1341 | 1340 | ui.status( |
|
1342 | 1341 | _( |
|
1343 | 1342 | b'nothing to rebase - "base" %s is ' |
|
1344 | 1343 | b'already an ancestor of destination ' |
|
1345 | 1344 | b'%s\n' |
|
1346 | 1345 | ) |
|
1347 | 1346 | % (b'+'.join(bytes(repo[r]) for r in base), dest) |
|
1348 | 1347 | ) |
|
1349 | 1348 | else: |
|
1350 | 1349 | ui.status( |
|
1351 | 1350 | _( |
|
1352 | 1351 | b'nothing to rebase - working ' |
|
1353 | 1352 | b'directory parent is already an ' |
|
1354 | 1353 | b'ancestor of destination %s\n' |
|
1355 | 1354 | ) |
|
1356 | 1355 | % dest |
|
1357 | 1356 | ) |
|
1358 | 1357 | else: # can it happen? |
|
1359 | 1358 | ui.status( |
|
1360 | 1359 | _(b'nothing to rebase from %s to %s\n') |
|
1361 | 1360 | % (b'+'.join(bytes(repo[r]) for r in base), dest) |
|
1362 | 1361 | ) |
|
1363 | 1362 | return None |
|
1364 | 1363 | |
|
1365 | 1364 | if nodemod.wdirrev in rebaseset: |
|
1366 | 1365 | raise error.Abort(_(b'cannot rebase the working copy')) |
|
1367 | 1366 | rebasingwcp = repo[b'.'].rev() in rebaseset |
|
1368 | 1367 | ui.log( |
|
1369 | 1368 | b"rebase", |
|
1370 | 1369 | b"rebasing working copy parent: %r\n", |
|
1371 | 1370 | rebasingwcp, |
|
1372 | 1371 | rebase_rebasing_wcp=rebasingwcp, |
|
1373 | 1372 | ) |
|
1374 | 1373 | if inmemory and rebasingwcp: |
|
1375 | 1374 | # Check these since we did not before. |
|
1376 | 1375 | cmdutil.checkunfinished(repo) |
|
1377 | 1376 | cmdutil.bailifchanged(repo) |
|
1378 | 1377 | |
|
1379 | 1378 | if not destf: |
|
1380 | 1379 | dest = repo[_destrebase(repo, rebaseset, destspace=destspace)] |
|
1381 | 1380 | destf = bytes(dest) |
|
1382 | 1381 | |
|
1383 | 1382 | allsrc = revsetlang.formatspec(b'%ld', rebaseset) |
|
1384 | 1383 | alias = {b'ALLSRC': allsrc} |
|
1385 | 1384 | |
|
1386 | 1385 | if dest is None: |
|
1387 | 1386 | try: |
|
1388 | 1387 | # fast path: try to resolve dest without SRC alias |
|
1389 | 1388 | dest = scmutil.revsingle(repo, destf, localalias=alias) |
|
1390 | 1389 | except error.RepoLookupError: |
|
1391 | 1390 | # multi-dest path: resolve dest for each SRC separately |
|
1392 | 1391 | destmap = {} |
|
1393 | 1392 | for r in rebaseset: |
|
1394 | 1393 | alias[b'SRC'] = revsetlang.formatspec(b'%d', r) |
|
1395 | 1394 | # use repo.anyrevs instead of scmutil.revsingle because we |
|
1396 | 1395 | # don't want to abort if destset is empty. |
|
1397 | 1396 | destset = repo.anyrevs([destf], user=True, localalias=alias) |
|
1398 | 1397 | size = len(destset) |
|
1399 | 1398 | if size == 1: |
|
1400 | 1399 | destmap[r] = destset.first() |
|
1401 | 1400 | elif size == 0: |
|
1402 | 1401 | ui.note(_(b'skipping %s - empty destination\n') % repo[r]) |
|
1403 | 1402 | else: |
|
1404 | 1403 | raise error.Abort( |
|
1405 | 1404 | _(b'rebase destination for %s is not unique') % repo[r] |
|
1406 | 1405 | ) |
|
1407 | 1406 | |
|
1408 | 1407 | if dest is not None: |
|
1409 | 1408 | # single-dest case: assign dest to each rev in rebaseset |
|
1410 | 1409 | destrev = dest.rev() |
|
1411 | 1410 | destmap = {r: destrev for r in rebaseset} # {srcrev: destrev} |
|
1412 | 1411 | |
|
1413 | 1412 | if not destmap: |
|
1414 | 1413 | ui.status(_(b'nothing to rebase - empty destination\n')) |
|
1415 | 1414 | return None |
|
1416 | 1415 | |
|
1417 | 1416 | return destmap |
|
1418 | 1417 | |
|
1419 | 1418 | |
|
1420 | 1419 | def externalparent(repo, state, destancestors): |
|
1421 | 1420 | """Return the revision that should be used as the second parent |
|
1422 | 1421 | when the revisions in state is collapsed on top of destancestors. |
|
1423 | 1422 | Abort if there is more than one parent. |
|
1424 | 1423 | """ |
|
1425 | 1424 | parents = set() |
|
1426 | 1425 | source = min(state) |
|
1427 | 1426 | for rev in state: |
|
1428 | 1427 | if rev == source: |
|
1429 | 1428 | continue |
|
1430 | 1429 | for p in repo[rev].parents(): |
|
1431 | 1430 | if p.rev() not in state and p.rev() not in destancestors: |
|
1432 | 1431 | parents.add(p.rev()) |
|
1433 | 1432 | if not parents: |
|
1434 | 1433 | return nullrev |
|
1435 | 1434 | if len(parents) == 1: |
|
1436 | 1435 | return parents.pop() |
|
1437 | 1436 | raise error.Abort( |
|
1438 | 1437 | _( |
|
1439 | 1438 | b'unable to collapse on top of %d, there is more ' |
|
1440 | 1439 | b'than one external parent: %s' |
|
1441 | 1440 | ) |
|
1442 | 1441 | % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents))) |
|
1443 | 1442 | ) |
|
1444 | 1443 | |
|
1445 | 1444 | |
|
1446 | 1445 | def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg): |
|
1447 | 1446 | '''Commit the memory changes with parents p1 and p2. |
|
1448 | 1447 | Return node of committed revision.''' |
|
1449 | 1448 | # By convention, ``extra['branch']`` (set by extrafn) clobbers |
|
1450 | 1449 | # ``branch`` (used when passing ``--keepbranches``). |
|
1451 | 1450 | branch = None |
|
1452 | 1451 | if b'branch' in extra: |
|
1453 | 1452 | branch = extra[b'branch'] |
|
1454 | 1453 | |
|
1455 | 1454 | # FIXME: We call _compact() because it's required to correctly detect |
|
1456 | 1455 | # changed files. This was added to fix a regression shortly before the 5.5 |
|
1457 | 1456 | # release. A proper fix will be done in the default branch. |
|
1458 | 1457 | wctx._compact() |
|
1459 | 1458 | memctx = wctx.tomemctx( |
|
1460 | 1459 | commitmsg, |
|
1461 | 1460 | date=date, |
|
1462 | 1461 | extra=extra, |
|
1463 | 1462 | user=user, |
|
1464 | 1463 | branch=branch, |
|
1465 | 1464 | editor=editor, |
|
1466 | 1465 | ) |
|
1467 | 1466 | if memctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'): |
|
1468 | 1467 | return None |
|
1469 | 1468 | commitres = repo.commitctx(memctx) |
|
1470 | 1469 | wctx.clean() # Might be reused |
|
1471 | 1470 | return commitres |
|
1472 | 1471 | |
|
1473 | 1472 | |
|
1474 | 1473 | def commitnode(repo, editor, extra, user, date, commitmsg): |
|
1475 | 1474 | '''Commit the wd changes with parents p1 and p2. |
|
1476 | 1475 | Return node of committed revision.''' |
|
1477 | 1476 | dsguard = util.nullcontextmanager() |
|
1478 | 1477 | if not repo.ui.configbool(b'rebase', b'singletransaction'): |
|
1479 | 1478 | dsguard = dirstateguard.dirstateguard(repo, b'rebase') |
|
1480 | 1479 | with dsguard: |
|
1481 | 1480 | # Commit might fail if unresolved files exist |
|
1482 | 1481 | newnode = repo.commit( |
|
1483 | 1482 | text=commitmsg, user=user, date=date, extra=extra, editor=editor |
|
1484 | 1483 | ) |
|
1485 | 1484 | |
|
1486 | 1485 | repo.dirstate.setbranch(repo[newnode].branch()) |
|
1487 | 1486 | return newnode |
|
1488 | 1487 | |
|
1489 | 1488 | |
|
1490 | 1489 | def rebasenode(repo, rev, p1, p2, base, collapse, wctx): |
|
1491 | 1490 | """Rebase a single revision rev on top of p1 using base as merge ancestor""" |
|
1492 | 1491 | # Merge phase |
|
1493 | 1492 | # Update to destination and merge it with local |
|
1494 | 1493 | p1ctx = repo[p1] |
|
1495 | 1494 | if wctx.isinmemory(): |
|
1496 | 1495 | wctx.setbase(p1ctx) |
|
1497 | 1496 | else: |
|
1498 | 1497 | if repo[b'.'].rev() != p1: |
|
1499 | 1498 | repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx)) |
|
1500 | 1499 | mergemod.clean_update(p1ctx) |
|
1501 | 1500 | else: |
|
1502 | 1501 | repo.ui.debug(b" already in destination\n") |
|
1503 | 1502 | # This is, alas, necessary to invalidate workingctx's manifest cache, |
|
1504 | 1503 | # as well as other data we litter on it in other places. |
|
1505 | 1504 | wctx = repo[None] |
|
1506 | 1505 | repo.dirstate.write(repo.currenttransaction()) |
|
1507 | 1506 | ctx = repo[rev] |
|
1508 | 1507 | repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx)) |
|
1509 | 1508 | if base is not None: |
|
1510 | 1509 | repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base])) |
|
1511 | 1510 | |
|
1512 | 1511 | # See explanation in merge.graft() |
|
1513 | 1512 | mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node()) |
|
1514 | 1513 | stats = mergemod._update( |
|
1515 | 1514 | repo, |
|
1516 | 1515 | rev, |
|
1517 | 1516 | branchmerge=True, |
|
1518 | 1517 | force=True, |
|
1519 | 1518 | ancestor=base, |
|
1520 | 1519 | mergeancestor=mergeancestor, |
|
1521 | 1520 | labels=[b'dest', b'source'], |
|
1522 | 1521 | wc=wctx, |
|
1523 | 1522 | ) |
|
1524 | 1523 | wctx.setparents(p1ctx.node(), repo[p2].node()) |
|
1525 | 1524 | if collapse: |
|
1526 | 1525 | copies.graftcopies(wctx, ctx, p1ctx) |
|
1527 | 1526 | else: |
|
1528 | 1527 | # If we're not using --collapse, we need to |
|
1529 | 1528 | # duplicate copies between the revision we're |
|
1530 | 1529 | # rebasing and its first parent. |
|
1531 | 1530 | copies.graftcopies(wctx, ctx, ctx.p1()) |
|
1532 | 1531 | |
|
1533 | 1532 | if stats.unresolvedcount > 0: |
|
1534 | 1533 | if wctx.isinmemory(): |
|
1535 | 1534 | raise error.InMemoryMergeConflictsError() |
|
1536 | 1535 | else: |
|
1537 | 1536 | raise error.ConflictResolutionRequired(b'rebase') |
|
1538 | 1537 | |
|
1539 | 1538 | |
|
1540 | 1539 | def adjustdest(repo, rev, destmap, state, skipped): |
|
1541 | 1540 | r"""adjust rebase destination given the current rebase state |
|
1542 | 1541 | |
|
1543 | 1542 | rev is what is being rebased. Return a list of two revs, which are the |
|
1544 | 1543 | adjusted destinations for rev's p1 and p2, respectively. If a parent is |
|
1545 | 1544 | nullrev, return dest without adjustment for it. |
|
1546 | 1545 | |
|
1547 | 1546 | For example, when doing rebasing B+E to F, C to G, rebase will first move B |
|
1548 | 1547 | to B1, and E's destination will be adjusted from F to B1. |
|
1549 | 1548 | |
|
1550 | 1549 | B1 <- written during rebasing B |
|
1551 | 1550 | | |
|
1552 | 1551 | F <- original destination of B, E |
|
1553 | 1552 | | |
|
1554 | 1553 | | E <- rev, which is being rebased |
|
1555 | 1554 | | | |
|
1556 | 1555 | | D <- prev, one parent of rev being checked |
|
1557 | 1556 | | | |
|
1558 | 1557 | | x <- skipped, ex. no successor or successor in (::dest) |
|
1559 | 1558 | | | |
|
1560 | 1559 | | C <- rebased as C', different destination |
|
1561 | 1560 | | | |
|
1562 | 1561 | | B <- rebased as B1 C' |
|
1563 | 1562 | |/ | |
|
1564 | 1563 | A G <- destination of C, different |
|
1565 | 1564 | |
|
1566 | 1565 | Another example about merge changeset, rebase -r C+G+H -d K, rebase will |
|
1567 | 1566 | first move C to C1, G to G1, and when it's checking H, the adjusted |
|
1568 | 1567 | destinations will be [C1, G1]. |
|
1569 | 1568 | |
|
1570 | 1569 | H C1 G1 |
|
1571 | 1570 | /| | / |
|
1572 | 1571 | F G |/ |
|
1573 | 1572 | K | | -> K |
|
1574 | 1573 | | C D | |
|
1575 | 1574 | | |/ | |
|
1576 | 1575 | | B | ... |
|
1577 | 1576 | |/ |/ |
|
1578 | 1577 | A A |
|
1579 | 1578 | |
|
1580 | 1579 | Besides, adjust dest according to existing rebase information. For example, |
|
1581 | 1580 | |
|
1582 | 1581 | B C D B needs to be rebased on top of C, C needs to be rebased on top |
|
1583 | 1582 | \|/ of D. We will rebase C first. |
|
1584 | 1583 | A |
|
1585 | 1584 | |
|
1586 | 1585 | C' After rebasing C, when considering B's destination, use C' |
|
1587 | 1586 | | instead of the original C. |
|
1588 | 1587 | B D |
|
1589 | 1588 | \ / |
|
1590 | 1589 | A |
|
1591 | 1590 | """ |
|
1592 | 1591 | # pick already rebased revs with same dest from state as interesting source |
|
1593 | 1592 | dest = destmap[rev] |
|
1594 | 1593 | source = [ |
|
1595 | 1594 | s |
|
1596 | 1595 | for s, d in state.items() |
|
1597 | 1596 | if d > 0 and destmap[s] == dest and s not in skipped |
|
1598 | 1597 | ] |
|
1599 | 1598 | |
|
1600 | 1599 | result = [] |
|
1601 | 1600 | for prev in repo.changelog.parentrevs(rev): |
|
1602 | 1601 | adjusted = dest |
|
1603 | 1602 | if prev != nullrev: |
|
1604 | 1603 | candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first() |
|
1605 | 1604 | if candidate is not None: |
|
1606 | 1605 | adjusted = state[candidate] |
|
1607 | 1606 | if adjusted == dest and dest in state: |
|
1608 | 1607 | adjusted = state[dest] |
|
1609 | 1608 | if adjusted == revtodo: |
|
1610 | 1609 | # sortsource should produce an order that makes this impossible |
|
1611 | 1610 | raise error.ProgrammingError( |
|
1612 | 1611 | b'rev %d should be rebased already at this time' % dest |
|
1613 | 1612 | ) |
|
1614 | 1613 | result.append(adjusted) |
|
1615 | 1614 | return result |
|
1616 | 1615 | |
|
1617 | 1616 | |
|
1618 | 1617 | def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped): |
|
1619 | 1618 | """ |
|
1620 | 1619 | Abort if rebase will create divergence or rebase is noop because of markers |
|
1621 | 1620 | |
|
1622 | 1621 | `rebaseobsrevs`: set of obsolete revision in source |
|
1623 | 1622 | `rebaseobsskipped`: set of revisions from source skipped because they have |
|
1624 | 1623 | successors in destination or no non-obsolete successor. |
|
1625 | 1624 | """ |
|
1626 | 1625 | # Obsolete node with successors not in dest leads to divergence |
|
1627 | 1626 | divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence') |
|
1628 | 1627 | divergencebasecandidates = rebaseobsrevs - rebaseobsskipped |
|
1629 | 1628 | |
|
1630 | 1629 | if divergencebasecandidates and not divergenceok: |
|
1631 | 1630 | divhashes = (bytes(repo[r]) for r in divergencebasecandidates) |
|
1632 | 1631 | msg = _(b"this rebase will cause divergences from: %s") |
|
1633 | 1632 | h = _( |
|
1634 | 1633 | b"to force the rebase please set " |
|
1635 | 1634 | b"experimental.evolution.allowdivergence=True" |
|
1636 | 1635 | ) |
|
1637 | 1636 | raise error.Abort(msg % (b",".join(divhashes),), hint=h) |
|
1638 | 1637 | |
|
1639 | 1638 | |
|
1640 | 1639 | def successorrevs(unfi, rev): |
|
1641 | 1640 | """yield revision numbers for successors of rev""" |
|
1642 | 1641 | assert unfi.filtername is None |
|
1643 | 1642 | get_rev = unfi.changelog.index.get_rev |
|
1644 | 1643 | for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]): |
|
1645 | 1644 | r = get_rev(s) |
|
1646 | 1645 | if r is not None: |
|
1647 | 1646 | yield r |
|
1648 | 1647 | |
|
1649 | 1648 | |
|
1650 | 1649 | def defineparents(repo, rev, destmap, state, skipped, obsskipped): |
|
1651 | 1650 | """Return new parents and optionally a merge base for rev being rebased |
|
1652 | 1651 | |
|
1653 | 1652 | The destination specified by "dest" cannot always be used directly because |
|
1654 | 1653 | previously rebase result could affect destination. For example, |
|
1655 | 1654 | |
|
1656 | 1655 | D E rebase -r C+D+E -d B |
|
1657 | 1656 | |/ C will be rebased to C' |
|
1658 | 1657 | B C D's new destination will be C' instead of B |
|
1659 | 1658 | |/ E's new destination will be C' instead of B |
|
1660 | 1659 | A |
|
1661 | 1660 | |
|
1662 | 1661 | The new parents of a merge is slightly more complicated. See the comment |
|
1663 | 1662 | block below. |
|
1664 | 1663 | """ |
|
1665 | 1664 | # use unfiltered changelog since successorrevs may return filtered nodes |
|
1666 | 1665 | assert repo.filtername is None |
|
1667 | 1666 | cl = repo.changelog |
|
1668 | 1667 | isancestor = cl.isancestorrev |
|
1669 | 1668 | |
|
1670 | 1669 | dest = destmap[rev] |
|
1671 | 1670 | oldps = repo.changelog.parentrevs(rev) # old parents |
|
1672 | 1671 | newps = [nullrev, nullrev] # new parents |
|
1673 | 1672 | dests = adjustdest(repo, rev, destmap, state, skipped) |
|
1674 | 1673 | bases = list(oldps) # merge base candidates, initially just old parents |
|
1675 | 1674 | |
|
1676 | 1675 | if all(r == nullrev for r in oldps[1:]): |
|
1677 | 1676 | # For non-merge changeset, just move p to adjusted dest as requested. |
|
1678 | 1677 | newps[0] = dests[0] |
|
1679 | 1678 | else: |
|
1680 | 1679 | # For merge changeset, if we move p to dests[i] unconditionally, both |
|
1681 | 1680 | # parents may change and the end result looks like "the merge loses a |
|
1682 | 1681 | # parent", which is a surprise. This is a limit because "--dest" only |
|
1683 | 1682 | # accepts one dest per src. |
|
1684 | 1683 | # |
|
1685 | 1684 | # Therefore, only move p with reasonable conditions (in this order): |
|
1686 | 1685 | # 1. use dest, if dest is a descendent of (p or one of p's successors) |
|
1687 | 1686 | # 2. use p's rebased result, if p is rebased (state[p] > 0) |
|
1688 | 1687 | # |
|
1689 | 1688 | # Comparing with adjustdest, the logic here does some additional work: |
|
1690 | 1689 | # 1. decide which parents will not be moved towards dest |
|
1691 | 1690 | # 2. if the above decision is "no", should a parent still be moved |
|
1692 | 1691 | # because it was rebased? |
|
1693 | 1692 | # |
|
1694 | 1693 | # For example: |
|
1695 | 1694 | # |
|
1696 | 1695 | # C # "rebase -r C -d D" is an error since none of the parents |
|
1697 | 1696 | # /| # can be moved. "rebase -r B+C -d D" will move C's parent |
|
1698 | 1697 | # A B D # B (using rule "2."), since B will be rebased. |
|
1699 | 1698 | # |
|
1700 | 1699 | # The loop tries to be not rely on the fact that a Mercurial node has |
|
1701 | 1700 | # at most 2 parents. |
|
1702 | 1701 | for i, p in enumerate(oldps): |
|
1703 | 1702 | np = p # new parent |
|
1704 | 1703 | if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)): |
|
1705 | 1704 | np = dests[i] |
|
1706 | 1705 | elif p in state and state[p] > 0: |
|
1707 | 1706 | np = state[p] |
|
1708 | 1707 | |
|
1709 | 1708 | # If one parent becomes an ancestor of the other, drop the ancestor |
|
1710 | 1709 | for j, x in enumerate(newps[:i]): |
|
1711 | 1710 | if x == nullrev: |
|
1712 | 1711 | continue |
|
1713 | 1712 | if isancestor(np, x): # CASE-1 |
|
1714 | 1713 | np = nullrev |
|
1715 | 1714 | elif isancestor(x, np): # CASE-2 |
|
1716 | 1715 | newps[j] = np |
|
1717 | 1716 | np = nullrev |
|
1718 | 1717 | # New parents forming an ancestor relationship does not |
|
1719 | 1718 | # mean the old parents have a similar relationship. Do not |
|
1720 | 1719 | # set bases[x] to nullrev. |
|
1721 | 1720 | bases[j], bases[i] = bases[i], bases[j] |
|
1722 | 1721 | |
|
1723 | 1722 | newps[i] = np |
|
1724 | 1723 | |
|
1725 | 1724 | # "rebasenode" updates to new p1, and the old p1 will be used as merge |
|
1726 | 1725 | # base. If only p2 changes, merging using unchanged p1 as merge base is |
|
1727 | 1726 | # suboptimal. Therefore swap parents to make the merge sane. |
|
1728 | 1727 | if newps[1] != nullrev and oldps[0] == newps[0]: |
|
1729 | 1728 | assert len(newps) == 2 and len(oldps) == 2 |
|
1730 | 1729 | newps.reverse() |
|
1731 | 1730 | bases.reverse() |
|
1732 | 1731 | |
|
1733 | 1732 | # No parent change might be an error because we fail to make rev a |
|
1734 | 1733 | # descendent of requested dest. This can happen, for example: |
|
1735 | 1734 | # |
|
1736 | 1735 | # C # rebase -r C -d D |
|
1737 | 1736 | # /| # None of A and B will be changed to D and rebase fails. |
|
1738 | 1737 | # A B D |
|
1739 | 1738 | if set(newps) == set(oldps) and dest not in newps: |
|
1740 | 1739 | raise error.Abort( |
|
1741 | 1740 | _( |
|
1742 | 1741 | b'cannot rebase %d:%s without ' |
|
1743 | 1742 | b'moving at least one of its parents' |
|
1744 | 1743 | ) |
|
1745 | 1744 | % (rev, repo[rev]) |
|
1746 | 1745 | ) |
|
1747 | 1746 | |
|
1748 | 1747 | # Source should not be ancestor of dest. The check here guarantees it's |
|
1749 | 1748 | # impossible. With multi-dest, the initial check does not cover complex |
|
1750 | 1749 | # cases since we don't have abstractions to dry-run rebase cheaply. |
|
1751 | 1750 | if any(p != nullrev and isancestor(rev, p) for p in newps): |
|
1752 | 1751 | raise error.Abort(_(b'source is ancestor of destination')) |
|
1753 | 1752 | |
|
1754 | 1753 | # Check if the merge will contain unwanted changes. That may happen if |
|
1755 | 1754 | # there are multiple special (non-changelog ancestor) merge bases, which |
|
1756 | 1755 | # cannot be handled well by the 3-way merge algorithm. For example: |
|
1757 | 1756 | # |
|
1758 | 1757 | # F |
|
1759 | 1758 | # /| |
|
1760 | 1759 | # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen |
|
1761 | 1760 | # | | # as merge base, the difference between D and F will include |
|
1762 | 1761 | # B C # C, so the rebased F will contain C surprisingly. If "E" was |
|
1763 | 1762 | # |/ # chosen, the rebased F will contain B. |
|
1764 | 1763 | # A Z |
|
1765 | 1764 | # |
|
1766 | 1765 | # But our merge base candidates (D and E in above case) could still be |
|
1767 | 1766 | # better than the default (ancestor(F, Z) == null). Therefore still |
|
1768 | 1767 | # pick one (so choose p1 above). |
|
1769 | 1768 | if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1: |
|
1770 | 1769 | unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i] |
|
1771 | 1770 | for i, base in enumerate(bases): |
|
1772 | 1771 | if base == nullrev or base in newps: |
|
1773 | 1772 | continue |
|
1774 | 1773 | # Revisions in the side (not chosen as merge base) branch that |
|
1775 | 1774 | # might contain "surprising" contents |
|
1776 | 1775 | other_bases = set(bases) - {base} |
|
1777 | 1776 | siderevs = list( |
|
1778 | 1777 | repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest) |
|
1779 | 1778 | ) |
|
1780 | 1779 | |
|
1781 | 1780 | # If those revisions are covered by rebaseset, the result is good. |
|
1782 | 1781 | # A merge in rebaseset would be considered to cover its ancestors. |
|
1783 | 1782 | if siderevs: |
|
1784 | 1783 | rebaseset = [ |
|
1785 | 1784 | r for r, d in state.items() if d > 0 and r not in obsskipped |
|
1786 | 1785 | ] |
|
1787 | 1786 | merges = [ |
|
1788 | 1787 | r for r in rebaseset if cl.parentrevs(r)[1] != nullrev |
|
1789 | 1788 | ] |
|
1790 | 1789 | unwanted[i] = list( |
|
1791 | 1790 | repo.revs( |
|
1792 | 1791 | b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset |
|
1793 | 1792 | ) |
|
1794 | 1793 | ) |
|
1795 | 1794 | |
|
1796 | 1795 | if any(revs is not None for revs in unwanted): |
|
1797 | 1796 | # Choose a merge base that has a minimal number of unwanted revs. |
|
1798 | 1797 | l, i = min( |
|
1799 | 1798 | (len(revs), i) |
|
1800 | 1799 | for i, revs in enumerate(unwanted) |
|
1801 | 1800 | if revs is not None |
|
1802 | 1801 | ) |
|
1803 | 1802 | |
|
1804 | 1803 | # The merge will include unwanted revisions. Abort now. Revisit this if |
|
1805 | 1804 | # we have a more advanced merge algorithm that handles multiple bases. |
|
1806 | 1805 | if l > 0: |
|
1807 | 1806 | unwanteddesc = _(b' or ').join( |
|
1808 | 1807 | ( |
|
1809 | 1808 | b', '.join(b'%d:%s' % (r, repo[r]) for r in revs) |
|
1810 | 1809 | for revs in unwanted |
|
1811 | 1810 | if revs is not None |
|
1812 | 1811 | ) |
|
1813 | 1812 | ) |
|
1814 | 1813 | raise error.Abort( |
|
1815 | 1814 | _(b'rebasing %d:%s will include unwanted changes from %s') |
|
1816 | 1815 | % (rev, repo[rev], unwanteddesc) |
|
1817 | 1816 | ) |
|
1818 | 1817 | |
|
1819 | 1818 | # newps[0] should match merge base if possible. Currently, if newps[i] |
|
1820 | 1819 | # is nullrev, the only case is newps[i] and newps[j] (j < i), one is |
|
1821 | 1820 | # the other's ancestor. In that case, it's fine to not swap newps here. |
|
1822 | 1821 | # (see CASE-1 and CASE-2 above) |
|
1823 | 1822 | if i != 0: |
|
1824 | 1823 | if newps[i] != nullrev: |
|
1825 | 1824 | newps[0], newps[i] = newps[i], newps[0] |
|
1826 | 1825 | bases[0], bases[i] = bases[i], bases[0] |
|
1827 | 1826 | |
|
1828 | 1827 | # "rebasenode" updates to new p1, use the corresponding merge base. |
|
1829 | 1828 | base = bases[0] |
|
1830 | 1829 | |
|
1831 | 1830 | repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps)) |
|
1832 | 1831 | |
|
1833 | 1832 | return newps[0], newps[1], base |
|
1834 | 1833 | |
|
1835 | 1834 | |
|
1836 | 1835 | def isagitpatch(repo, patchname): |
|
1837 | 1836 | """Return true if the given patch is in git format""" |
|
1838 | 1837 | mqpatch = os.path.join(repo.mq.path, patchname) |
|
1839 | 1838 | for line in patch.linereader(open(mqpatch, b'rb')): |
|
1840 | 1839 | if line.startswith(b'diff --git'): |
|
1841 | 1840 | return True |
|
1842 | 1841 | return False |
|
1843 | 1842 | |
|
1844 | 1843 | |
|
1845 | 1844 | def updatemq(repo, state, skipped, **opts): |
|
1846 | 1845 | """Update rebased mq patches - finalize and then import them""" |
|
1847 | 1846 | mqrebase = {} |
|
1848 | 1847 | mq = repo.mq |
|
1849 | 1848 | original_series = mq.fullseries[:] |
|
1850 | 1849 | skippedpatches = set() |
|
1851 | 1850 | |
|
1852 | 1851 | for p in mq.applied: |
|
1853 | 1852 | rev = repo[p.node].rev() |
|
1854 | 1853 | if rev in state: |
|
1855 | 1854 | repo.ui.debug( |
|
1856 | 1855 | b'revision %d is an mq patch (%s), finalize it.\n' |
|
1857 | 1856 | % (rev, p.name) |
|
1858 | 1857 | ) |
|
1859 | 1858 | mqrebase[rev] = (p.name, isagitpatch(repo, p.name)) |
|
1860 | 1859 | else: |
|
1861 | 1860 | # Applied but not rebased, not sure this should happen |
|
1862 | 1861 | skippedpatches.add(p.name) |
|
1863 | 1862 | |
|
1864 | 1863 | if mqrebase: |
|
1865 | 1864 | mq.finish(repo, mqrebase.keys()) |
|
1866 | 1865 | |
|
1867 | 1866 | # We must start import from the newest revision |
|
1868 | 1867 | for rev in sorted(mqrebase, reverse=True): |
|
1869 | 1868 | if rev not in skipped: |
|
1870 | 1869 | name, isgit = mqrebase[rev] |
|
1871 | 1870 | repo.ui.note( |
|
1872 | 1871 | _(b'updating mq patch %s to %d:%s\n') |
|
1873 | 1872 | % (name, state[rev], repo[state[rev]]) |
|
1874 | 1873 | ) |
|
1875 | 1874 | mq.qimport( |
|
1876 | 1875 | repo, |
|
1877 | 1876 | (), |
|
1878 | 1877 | patchname=name, |
|
1879 | 1878 | git=isgit, |
|
1880 | 1879 | rev=[b"%d" % state[rev]], |
|
1881 | 1880 | ) |
|
1882 | 1881 | else: |
|
1883 | 1882 | # Rebased and skipped |
|
1884 | 1883 | skippedpatches.add(mqrebase[rev][0]) |
|
1885 | 1884 | |
|
1886 | 1885 | # Patches were either applied and rebased and imported in |
|
1887 | 1886 | # order, applied and removed or unapplied. Discard the removed |
|
1888 | 1887 | # ones while preserving the original series order and guards. |
|
1889 | 1888 | newseries = [ |
|
1890 | 1889 | s |
|
1891 | 1890 | for s in original_series |
|
1892 | 1891 | if mq.guard_re.split(s, 1)[0] not in skippedpatches |
|
1893 | 1892 | ] |
|
1894 | 1893 | mq.fullseries[:] = newseries |
|
1895 | 1894 | mq.seriesdirty = True |
|
1896 | 1895 | mq.savedirty() |
|
1897 | 1896 | |
|
1898 | 1897 | |
|
1899 | 1898 | def storecollapsemsg(repo, collapsemsg): |
|
1900 | 1899 | """Store the collapse message to allow recovery""" |
|
1901 | 1900 | collapsemsg = collapsemsg or b'' |
|
1902 | 1901 | f = repo.vfs(b"last-message.txt", b"w") |
|
1903 | 1902 | f.write(b"%s\n" % collapsemsg) |
|
1904 | 1903 | f.close() |
|
1905 | 1904 | |
|
1906 | 1905 | |
|
1907 | 1906 | def clearcollapsemsg(repo): |
|
1908 | 1907 | """Remove collapse message file""" |
|
1909 | 1908 | repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True) |
|
1910 | 1909 | |
|
1911 | 1910 | |
|
1912 | 1911 | def restorecollapsemsg(repo, isabort): |
|
1913 | 1912 | """Restore previously stored collapse message""" |
|
1914 | 1913 | try: |
|
1915 | 1914 | f = repo.vfs(b"last-message.txt") |
|
1916 | 1915 | collapsemsg = f.readline().strip() |
|
1917 | 1916 | f.close() |
|
1918 | 1917 | except IOError as err: |
|
1919 | 1918 | if err.errno != errno.ENOENT: |
|
1920 | 1919 | raise |
|
1921 | 1920 | if isabort: |
|
1922 | 1921 | # Oh well, just abort like normal |
|
1923 | 1922 | collapsemsg = b'' |
|
1924 | 1923 | else: |
|
1925 | 1924 | raise error.Abort(_(b'missing .hg/last-message.txt for rebase')) |
|
1926 | 1925 | return collapsemsg |
|
1927 | 1926 | |
|
1928 | 1927 | |
|
1929 | 1928 | def clearstatus(repo): |
|
1930 | 1929 | """Remove the status files""" |
|
1931 | 1930 | # Make sure the active transaction won't write the state file |
|
1932 | 1931 | tr = repo.currenttransaction() |
|
1933 | 1932 | if tr: |
|
1934 | 1933 | tr.removefilegenerator(b'rebasestate') |
|
1935 | 1934 | repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True) |
|
1936 | 1935 | |
|
1937 | 1936 | |
|
1938 | 1937 | def sortsource(destmap): |
|
1939 | 1938 | """yield source revisions in an order that we only rebase things once |
|
1940 | 1939 | |
|
1941 | 1940 | If source and destination overlaps, we should filter out revisions |
|
1942 | 1941 | depending on other revisions which hasn't been rebased yet. |
|
1943 | 1942 | |
|
1944 | 1943 | Yield a sorted list of revisions each time. |
|
1945 | 1944 | |
|
1946 | 1945 | For example, when rebasing A to B, B to C. This function yields [B], then |
|
1947 | 1946 | [A], indicating B needs to be rebased first. |
|
1948 | 1947 | |
|
1949 | 1948 | Raise if there is a cycle so the rebase is impossible. |
|
1950 | 1949 | """ |
|
1951 | 1950 | srcset = set(destmap) |
|
1952 | 1951 | while srcset: |
|
1953 | 1952 | srclist = sorted(srcset) |
|
1954 | 1953 | result = [] |
|
1955 | 1954 | for r in srclist: |
|
1956 | 1955 | if destmap[r] not in srcset: |
|
1957 | 1956 | result.append(r) |
|
1958 | 1957 | if not result: |
|
1959 | 1958 | raise error.Abort(_(b'source and destination form a cycle')) |
|
1960 | 1959 | srcset -= set(result) |
|
1961 | 1960 | yield result |
|
1962 | 1961 | |
|
1963 | 1962 | |
|
1964 | 1963 | def buildstate(repo, destmap, collapse): |
|
1965 | 1964 | '''Define which revisions are going to be rebased and where |
|
1966 | 1965 | |
|
1967 | 1966 | repo: repo |
|
1968 | 1967 | destmap: {srcrev: destrev} |
|
1969 | 1968 | ''' |
|
1970 | 1969 | rebaseset = destmap.keys() |
|
1971 | 1970 | originalwd = repo[b'.'].rev() |
|
1972 | 1971 | |
|
1973 | 1972 | # This check isn't strictly necessary, since mq detects commits over an |
|
1974 | 1973 | # applied patch. But it prevents messing up the working directory when |
|
1975 | 1974 | # a partially completed rebase is blocked by mq. |
|
1976 | 1975 | if b'qtip' in repo.tags(): |
|
1977 | 1976 | mqapplied = {repo[s.node].rev() for s in repo.mq.applied} |
|
1978 | 1977 | if set(destmap.values()) & mqapplied: |
|
1979 | 1978 | raise error.Abort(_(b'cannot rebase onto an applied mq patch')) |
|
1980 | 1979 | |
|
1981 | 1980 | # Get "cycle" error early by exhausting the generator. |
|
1982 | 1981 | sortedsrc = list(sortsource(destmap)) # a list of sorted revs |
|
1983 | 1982 | if not sortedsrc: |
|
1984 | 1983 | raise error.Abort(_(b'no matching revisions')) |
|
1985 | 1984 | |
|
1986 | 1985 | # Only check the first batch of revisions to rebase not depending on other |
|
1987 | 1986 | # rebaseset. This means "source is ancestor of destination" for the second |
|
1988 | 1987 | # (and following) batches of revisions are not checked here. We rely on |
|
1989 | 1988 | # "defineparents" to do that check. |
|
1990 | 1989 | roots = list(repo.set(b'roots(%ld)', sortedsrc[0])) |
|
1991 | 1990 | if not roots: |
|
1992 | 1991 | raise error.Abort(_(b'no matching revisions')) |
|
1993 | 1992 | |
|
1994 | 1993 | def revof(r): |
|
1995 | 1994 | return r.rev() |
|
1996 | 1995 | |
|
1997 | 1996 | roots = sorted(roots, key=revof) |
|
1998 | 1997 | state = dict.fromkeys(rebaseset, revtodo) |
|
1999 | 1998 | emptyrebase = len(sortedsrc) == 1 |
|
2000 | 1999 | for root in roots: |
|
2001 | 2000 | dest = repo[destmap[root.rev()]] |
|
2002 | 2001 | commonbase = root.ancestor(dest) |
|
2003 | 2002 | if commonbase == root: |
|
2004 | 2003 | raise error.Abort(_(b'source is ancestor of destination')) |
|
2005 | 2004 | if commonbase == dest: |
|
2006 | 2005 | wctx = repo[None] |
|
2007 | 2006 | if dest == wctx.p1(): |
|
2008 | 2007 | # when rebasing to '.', it will use the current wd branch name |
|
2009 | 2008 | samebranch = root.branch() == wctx.branch() |
|
2010 | 2009 | else: |
|
2011 | 2010 | samebranch = root.branch() == dest.branch() |
|
2012 | 2011 | if not collapse and samebranch and dest in root.parents(): |
|
2013 | 2012 | # mark the revision as done by setting its new revision |
|
2014 | 2013 | # equal to its old (current) revisions |
|
2015 | 2014 | state[root.rev()] = root.rev() |
|
2016 | 2015 | repo.ui.debug(b'source is a child of destination\n') |
|
2017 | 2016 | continue |
|
2018 | 2017 | |
|
2019 | 2018 | emptyrebase = False |
|
2020 | 2019 | repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root)) |
|
2021 | 2020 | if emptyrebase: |
|
2022 | 2021 | return None |
|
2023 | 2022 | for rev in sorted(state): |
|
2024 | 2023 | parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev] |
|
2025 | 2024 | # if all parents of this revision are done, then so is this revision |
|
2026 | 2025 | if parents and all((state.get(p) == p for p in parents)): |
|
2027 | 2026 | state[rev] = rev |
|
2028 | 2027 | return originalwd, destmap, state |
|
2029 | 2028 | |
|
2030 | 2029 | |
|
2031 | 2030 | def clearrebased( |
|
2032 | 2031 | ui, |
|
2033 | 2032 | repo, |
|
2034 | 2033 | destmap, |
|
2035 | 2034 | state, |
|
2036 | 2035 | skipped, |
|
2037 | 2036 | collapsedas=None, |
|
2038 | 2037 | keepf=False, |
|
2039 | 2038 | fm=None, |
|
2040 | 2039 | backup=True, |
|
2041 | 2040 | ): |
|
2042 | 2041 | """dispose of rebased revision at the end of the rebase |
|
2043 | 2042 | |
|
2044 | 2043 | If `collapsedas` is not None, the rebase was a collapse whose result if the |
|
2045 | 2044 | `collapsedas` node. |
|
2046 | 2045 | |
|
2047 | 2046 | If `keepf` is not True, the rebase has --keep set and no nodes should be |
|
2048 | 2047 | removed (but bookmarks still need to be moved). |
|
2049 | 2048 | |
|
2050 | 2049 | If `backup` is False, no backup will be stored when stripping rebased |
|
2051 | 2050 | revisions. |
|
2052 | 2051 | """ |
|
2053 | 2052 | tonode = repo.changelog.node |
|
2054 | 2053 | replacements = {} |
|
2055 | 2054 | moves = {} |
|
2056 | 2055 | stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt) |
|
2057 | 2056 | |
|
2058 | 2057 | collapsednodes = [] |
|
2059 | 2058 | for rev, newrev in sorted(state.items()): |
|
2060 | 2059 | if newrev >= 0 and newrev != rev: |
|
2061 | 2060 | oldnode = tonode(rev) |
|
2062 | 2061 | newnode = collapsedas or tonode(newrev) |
|
2063 | 2062 | moves[oldnode] = newnode |
|
2064 | 2063 | succs = None |
|
2065 | 2064 | if rev in skipped: |
|
2066 | 2065 | if stripcleanup or not repo[rev].obsolete(): |
|
2067 | 2066 | succs = () |
|
2068 | 2067 | elif collapsedas: |
|
2069 | 2068 | collapsednodes.append(oldnode) |
|
2070 | 2069 | else: |
|
2071 | 2070 | succs = (newnode,) |
|
2072 | 2071 | if succs is not None: |
|
2073 | 2072 | replacements[(oldnode,)] = succs |
|
2074 | 2073 | if collapsednodes: |
|
2075 | 2074 | replacements[tuple(collapsednodes)] = (collapsedas,) |
|
2076 | 2075 | if fm: |
|
2077 | 2076 | hf = fm.hexfunc |
|
2078 | 2077 | fl = fm.formatlist |
|
2079 | 2078 | fd = fm.formatdict |
|
2080 | 2079 | changes = {} |
|
2081 | 2080 | for oldns, newn in pycompat.iteritems(replacements): |
|
2082 | 2081 | for oldn in oldns: |
|
2083 | 2082 | changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node') |
|
2084 | 2083 | nodechanges = fd(changes, key=b"oldnode", value=b"newnodes") |
|
2085 | 2084 | fm.data(nodechanges=nodechanges) |
|
2086 | 2085 | if keepf: |
|
2087 | 2086 | replacements = {} |
|
2088 | 2087 | scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup) |
|
2089 | 2088 | |
|
2090 | 2089 | |
|
2091 | 2090 | def pullrebase(orig, ui, repo, *args, **opts): |
|
2092 | 2091 | """Call rebase after pull if the latter has been invoked with --rebase""" |
|
2093 | 2092 | if opts.get('rebase'): |
|
2094 | 2093 | if ui.configbool(b'commands', b'rebase.requiredest'): |
|
2095 | 2094 | msg = _(b'rebase destination required by configuration') |
|
2096 | 2095 | hint = _(b'use hg pull followed by hg rebase -d DEST') |
|
2097 | 2096 | raise error.Abort(msg, hint=hint) |
|
2098 | 2097 | |
|
2099 | 2098 | with repo.wlock(), repo.lock(): |
|
2100 | 2099 | if opts.get('update'): |
|
2101 | 2100 | del opts['update'] |
|
2102 | 2101 | ui.debug( |
|
2103 | 2102 | b'--update and --rebase are not compatible, ignoring ' |
|
2104 | 2103 | b'the update flag\n' |
|
2105 | 2104 | ) |
|
2106 | 2105 | |
|
2107 | 2106 | cmdutil.checkunfinished(repo, skipmerge=True) |
|
2108 | 2107 | cmdutil.bailifchanged( |
|
2109 | 2108 | repo, |
|
2110 | 2109 | hint=_( |
|
2111 | 2110 | b'cannot pull with rebase: ' |
|
2112 | 2111 | b'please commit or shelve your changes first' |
|
2113 | 2112 | ), |
|
2114 | 2113 | ) |
|
2115 | 2114 | |
|
2116 | 2115 | revsprepull = len(repo) |
|
2117 | 2116 | origpostincoming = commands.postincoming |
|
2118 | 2117 | |
|
2119 | 2118 | def _dummy(*args, **kwargs): |
|
2120 | 2119 | pass |
|
2121 | 2120 | |
|
2122 | 2121 | commands.postincoming = _dummy |
|
2123 | 2122 | try: |
|
2124 | 2123 | ret = orig(ui, repo, *args, **opts) |
|
2125 | 2124 | finally: |
|
2126 | 2125 | commands.postincoming = origpostincoming |
|
2127 | 2126 | revspostpull = len(repo) |
|
2128 | 2127 | if revspostpull > revsprepull: |
|
2129 | 2128 | # --rev option from pull conflict with rebase own --rev |
|
2130 | 2129 | # dropping it |
|
2131 | 2130 | if 'rev' in opts: |
|
2132 | 2131 | del opts['rev'] |
|
2133 | 2132 | # positional argument from pull conflicts with rebase's own |
|
2134 | 2133 | # --source. |
|
2135 | 2134 | if 'source' in opts: |
|
2136 | 2135 | del opts['source'] |
|
2137 | 2136 | # revsprepull is the len of the repo, not revnum of tip. |
|
2138 | 2137 | destspace = list(repo.changelog.revs(start=revsprepull)) |
|
2139 | 2138 | opts['_destspace'] = destspace |
|
2140 | 2139 | try: |
|
2141 | 2140 | rebase(ui, repo, **opts) |
|
2142 | 2141 | except error.NoMergeDestAbort: |
|
2143 | 2142 | # we can maybe update instead |
|
2144 | 2143 | rev, _a, _b = destutil.destupdate(repo) |
|
2145 | 2144 | if rev == repo[b'.'].rev(): |
|
2146 | 2145 | ui.status(_(b'nothing to rebase\n')) |
|
2147 | 2146 | else: |
|
2148 | 2147 | ui.status(_(b'nothing to rebase - updating instead\n')) |
|
2149 | 2148 | # not passing argument to get the bare update behavior |
|
2150 | 2149 | # with warning and trumpets |
|
2151 | 2150 | commands.update(ui, repo) |
|
2152 | 2151 | else: |
|
2153 | 2152 | if opts.get('tool'): |
|
2154 | 2153 | raise error.Abort(_(b'--tool can only be used with --rebase')) |
|
2155 | 2154 | ret = orig(ui, repo, *args, **opts) |
|
2156 | 2155 | |
|
2157 | 2156 | return ret |
|
2158 | 2157 | |
|
2159 | 2158 | |
|
2160 | 2159 | def _filterobsoleterevs(repo, revs): |
|
2161 | 2160 | """returns a set of the obsolete revisions in revs""" |
|
2162 | 2161 | return {r for r in revs if repo[r].obsolete()} |
|
2163 | 2162 | |
|
2164 | 2163 | |
|
2165 | 2164 | def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap): |
|
2166 | 2165 | """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination). |
|
2167 | 2166 | |
|
2168 | 2167 | `obsoletenotrebased` is a mapping mapping obsolete => successor for all |
|
2169 | 2168 | obsolete nodes to be rebased given in `rebaseobsrevs`. |
|
2170 | 2169 | |
|
2171 | 2170 | `obsoletewithoutsuccessorindestination` is a set with obsolete revisions |
|
2172 | 2171 | without a successor in destination. |
|
2173 | 2172 | |
|
2174 | 2173 | `obsoleteextinctsuccessors` is a set of obsolete revisions with only |
|
2175 | 2174 | obsolete successors. |
|
2176 | 2175 | """ |
|
2177 | 2176 | obsoletenotrebased = {} |
|
2178 | 2177 | obsoletewithoutsuccessorindestination = set() |
|
2179 | 2178 | obsoleteextinctsuccessors = set() |
|
2180 | 2179 | |
|
2181 | 2180 | assert repo.filtername is None |
|
2182 | 2181 | cl = repo.changelog |
|
2183 | 2182 | get_rev = cl.index.get_rev |
|
2184 | 2183 | extinctrevs = set(repo.revs(b'extinct()')) |
|
2185 | 2184 | for srcrev in rebaseobsrevs: |
|
2186 | 2185 | srcnode = cl.node(srcrev) |
|
2187 | 2186 | # XXX: more advanced APIs are required to handle split correctly |
|
2188 | 2187 | successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode])) |
|
2189 | 2188 | # obsutil.allsuccessors includes node itself |
|
2190 | 2189 | successors.remove(srcnode) |
|
2191 | 2190 | succrevs = {get_rev(s) for s in successors} |
|
2192 | 2191 | succrevs.discard(None) |
|
2193 | 2192 | if succrevs.issubset(extinctrevs): |
|
2194 | 2193 | # all successors are extinct |
|
2195 | 2194 | obsoleteextinctsuccessors.add(srcrev) |
|
2196 | 2195 | if not successors: |
|
2197 | 2196 | # no successor |
|
2198 | 2197 | obsoletenotrebased[srcrev] = None |
|
2199 | 2198 | else: |
|
2200 | 2199 | dstrev = destmap[srcrev] |
|
2201 | 2200 | for succrev in succrevs: |
|
2202 | 2201 | if cl.isancestorrev(succrev, dstrev): |
|
2203 | 2202 | obsoletenotrebased[srcrev] = succrev |
|
2204 | 2203 | break |
|
2205 | 2204 | else: |
|
2206 | 2205 | # If 'srcrev' has a successor in rebase set but none in |
|
2207 | 2206 | # destination (which would be catched above), we shall skip it |
|
2208 | 2207 | # and its descendants to avoid divergence. |
|
2209 | 2208 | if srcrev in extinctrevs or any(s in destmap for s in succrevs): |
|
2210 | 2209 | obsoletewithoutsuccessorindestination.add(srcrev) |
|
2211 | 2210 | |
|
2212 | 2211 | return ( |
|
2213 | 2212 | obsoletenotrebased, |
|
2214 | 2213 | obsoletewithoutsuccessorindestination, |
|
2215 | 2214 | obsoleteextinctsuccessors, |
|
2216 | 2215 | ) |
|
2217 | 2216 | |
|
2218 | 2217 | |
|
2219 | 2218 | def abortrebase(ui, repo): |
|
2220 | 2219 | with repo.wlock(), repo.lock(): |
|
2221 | 2220 | rbsrt = rebaseruntime(repo, ui) |
|
2222 | 2221 | rbsrt._prepareabortorcontinue(isabort=True) |
|
2223 | 2222 | |
|
2224 | 2223 | |
|
2225 | 2224 | def continuerebase(ui, repo): |
|
2226 | 2225 | with repo.wlock(), repo.lock(): |
|
2227 | 2226 | rbsrt = rebaseruntime(repo, ui) |
|
2228 | 2227 | ms = mergestatemod.mergestate.read(repo) |
|
2229 | 2228 | mergeutil.checkunresolved(ms) |
|
2230 | 2229 | retcode = rbsrt._prepareabortorcontinue(isabort=False) |
|
2231 | 2230 | if retcode is not None: |
|
2232 | 2231 | return retcode |
|
2233 | 2232 | rbsrt._performrebase(None) |
|
2234 | 2233 | rbsrt._finishrebase() |
|
2235 | 2234 | |
|
2236 | 2235 | |
|
2237 | 2236 | def summaryhook(ui, repo): |
|
2238 | 2237 | if not repo.vfs.exists(b'rebasestate'): |
|
2239 | 2238 | return |
|
2240 | 2239 | try: |
|
2241 | 2240 | rbsrt = rebaseruntime(repo, ui, {}) |
|
2242 | 2241 | rbsrt.restorestatus() |
|
2243 | 2242 | state = rbsrt.state |
|
2244 | 2243 | except error.RepoLookupError: |
|
2245 | 2244 | # i18n: column positioning for "hg summary" |
|
2246 | 2245 | msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n') |
|
2247 | 2246 | ui.write(msg) |
|
2248 | 2247 | return |
|
2249 | 2248 | numrebased = len([i for i in pycompat.itervalues(state) if i >= 0]) |
|
2250 | 2249 | # i18n: column positioning for "hg summary" |
|
2251 | 2250 | ui.write( |
|
2252 | 2251 | _(b'rebase: %s, %s (rebase --continue)\n') |
|
2253 | 2252 | % ( |
|
2254 | 2253 | ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased, |
|
2255 | 2254 | ui.label(_(b'%d remaining'), b'rebase.remaining') |
|
2256 | 2255 | % (len(state) - numrebased), |
|
2257 | 2256 | ) |
|
2258 | 2257 | ) |
|
2259 | 2258 | |
|
2260 | 2259 | |
|
2261 | 2260 | def uisetup(ui): |
|
2262 | 2261 | # Replace pull with a decorator to provide --rebase option |
|
2263 | 2262 | entry = extensions.wrapcommand(commands.table, b'pull', pullrebase) |
|
2264 | 2263 | entry[1].append( |
|
2265 | 2264 | (b'', b'rebase', None, _(b"rebase working directory to branch head")) |
|
2266 | 2265 | ) |
|
2267 | 2266 | entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase"))) |
|
2268 | 2267 | cmdutil.summaryhooks.add(b'rebase', summaryhook) |
|
2269 | 2268 | statemod.addunfinished( |
|
2270 | 2269 | b'rebase', |
|
2271 | 2270 | fname=b'rebasestate', |
|
2272 | 2271 | stopflag=True, |
|
2273 | 2272 | continueflag=True, |
|
2274 | 2273 | abortfunc=abortrebase, |
|
2275 | 2274 | continuefunc=continuerebase, |
|
2276 | 2275 | ) |
@@ -1,440 +1,440 b'' | |||
|
1 | 1 | # sparse.py - allow sparse checkouts of the working directory |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2014 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """allow sparse checkouts of the working directory (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | (This extension is not yet protected by backwards compatibility |
|
11 | 11 | guarantees. Any aspect may break in future releases until this |
|
12 | 12 | notice is removed.) |
|
13 | 13 | |
|
14 | 14 | This extension allows the working directory to only consist of a |
|
15 | 15 | subset of files for the revision. This allows specific files or |
|
16 | 16 | directories to be explicitly included or excluded. Many repository |
|
17 | 17 | operations have performance proportional to the number of files in |
|
18 | 18 | the working directory. So only realizing a subset of files in the |
|
19 | 19 | working directory can improve performance. |
|
20 | 20 | |
|
21 | 21 | Sparse Config Files |
|
22 | 22 | ------------------- |
|
23 | 23 | |
|
24 | 24 | The set of files that are part of a sparse checkout are defined by |
|
25 | 25 | a sparse config file. The file defines 3 things: includes (files to |
|
26 | 26 | include in the sparse checkout), excludes (files to exclude from the |
|
27 | 27 | sparse checkout), and profiles (links to other config files). |
|
28 | 28 | |
|
29 | 29 | The file format is newline delimited. Empty lines and lines beginning |
|
30 | 30 | with ``#`` are ignored. |
|
31 | 31 | |
|
32 | 32 | Lines beginning with ``%include `` denote another sparse config file |
|
33 | 33 | to include. e.g. ``%include tests.sparse``. The filename is relative |
|
34 | 34 | to the repository root. |
|
35 | 35 | |
|
36 | 36 | The special lines ``[include]`` and ``[exclude]`` denote the section |
|
37 | 37 | for includes and excludes that follow, respectively. It is illegal to |
|
38 | 38 | have ``[include]`` after ``[exclude]``. |
|
39 | 39 | |
|
40 | 40 | Non-special lines resemble file patterns to be added to either includes |
|
41 | 41 | or excludes. The syntax of these lines is documented by :hg:`help patterns`. |
|
42 | 42 | Patterns are interpreted as ``glob:`` by default and match against the |
|
43 | 43 | root of the repository. |
|
44 | 44 | |
|
45 | 45 | Exclusion patterns take precedence over inclusion patterns. So even |
|
46 | 46 | if a file is explicitly included, an ``[exclude]`` entry can remove it. |
|
47 | 47 | |
|
48 | 48 | For example, say you have a repository with 3 directories, ``frontend/``, |
|
49 | 49 | ``backend/``, and ``tools/``. ``frontend/`` and ``backend/`` correspond |
|
50 | 50 | to different projects and it is uncommon for someone working on one |
|
51 | 51 | to need the files for the other. But ``tools/`` contains files shared |
|
52 | 52 | between both projects. Your sparse config files may resemble:: |
|
53 | 53 | |
|
54 | 54 | # frontend.sparse |
|
55 | 55 | frontend/** |
|
56 | 56 | tools/** |
|
57 | 57 | |
|
58 | 58 | # backend.sparse |
|
59 | 59 | backend/** |
|
60 | 60 | tools/** |
|
61 | 61 | |
|
62 | 62 | Say the backend grows in size. Or there's a directory with thousands |
|
63 | 63 | of files you wish to exclude. You can modify the profile to exclude |
|
64 | 64 | certain files:: |
|
65 | 65 | |
|
66 | 66 | [include] |
|
67 | 67 | backend/** |
|
68 | 68 | tools/** |
|
69 | 69 | |
|
70 | 70 | [exclude] |
|
71 | 71 | tools/tests/** |
|
72 | 72 | """ |
|
73 | 73 | |
|
74 | 74 | from __future__ import absolute_import |
|
75 | 75 | |
|
76 | 76 | from mercurial.i18n import _ |
|
77 | 77 | from mercurial.pycompat import setattr |
|
78 | 78 | from mercurial import ( |
|
79 | 79 | commands, |
|
80 | 80 | dirstate, |
|
81 | 81 | error, |
|
82 | 82 | extensions, |
|
83 | hg, | |
|
84 | 83 | logcmdutil, |
|
85 | 84 | match as matchmod, |
|
85 | merge as mergemod, | |
|
86 | 86 | pycompat, |
|
87 | 87 | registrar, |
|
88 | 88 | sparse, |
|
89 | 89 | util, |
|
90 | 90 | ) |
|
91 | 91 | |
|
92 | 92 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
93 | 93 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
94 | 94 | # be specifying the version(s) of Mercurial they are tested with, or |
|
95 | 95 | # leave the attribute unspecified. |
|
96 | 96 | testedwith = b'ships-with-hg-core' |
|
97 | 97 | |
|
98 | 98 | cmdtable = {} |
|
99 | 99 | command = registrar.command(cmdtable) |
|
100 | 100 | |
|
101 | 101 | |
|
102 | 102 | def extsetup(ui): |
|
103 | 103 | sparse.enabled = True |
|
104 | 104 | |
|
105 | 105 | _setupclone(ui) |
|
106 | 106 | _setuplog(ui) |
|
107 | 107 | _setupadd(ui) |
|
108 | 108 | _setupdirstate(ui) |
|
109 | 109 | |
|
110 | 110 | |
|
111 | 111 | def replacefilecache(cls, propname, replacement): |
|
112 | 112 | """Replace a filecache property with a new class. This allows changing the |
|
113 | 113 | cache invalidation condition.""" |
|
114 | 114 | origcls = cls |
|
115 | 115 | assert callable(replacement) |
|
116 | 116 | while cls is not object: |
|
117 | 117 | if propname in cls.__dict__: |
|
118 | 118 | orig = cls.__dict__[propname] |
|
119 | 119 | setattr(cls, propname, replacement(orig)) |
|
120 | 120 | break |
|
121 | 121 | cls = cls.__bases__[0] |
|
122 | 122 | |
|
123 | 123 | if cls is object: |
|
124 | 124 | raise AttributeError( |
|
125 | 125 | _(b"type '%s' has no property '%s'") % (origcls, propname) |
|
126 | 126 | ) |
|
127 | 127 | |
|
128 | 128 | |
|
129 | 129 | def _setuplog(ui): |
|
130 | 130 | entry = commands.table[b'log|history'] |
|
131 | 131 | entry[1].append( |
|
132 | 132 | ( |
|
133 | 133 | b'', |
|
134 | 134 | b'sparse', |
|
135 | 135 | None, |
|
136 | 136 | b"limit to changesets affecting the sparse checkout", |
|
137 | 137 | ) |
|
138 | 138 | ) |
|
139 | 139 | |
|
140 | 140 | def _initialrevs(orig, repo, wopts): |
|
141 | 141 | revs = orig(repo, wopts) |
|
142 | 142 | if wopts.opts.get(b'sparse'): |
|
143 | 143 | sparsematch = sparse.matcher(repo) |
|
144 | 144 | |
|
145 | 145 | def ctxmatch(rev): |
|
146 | 146 | ctx = repo[rev] |
|
147 | 147 | return any(f for f in ctx.files() if sparsematch(f)) |
|
148 | 148 | |
|
149 | 149 | revs = revs.filter(ctxmatch) |
|
150 | 150 | return revs |
|
151 | 151 | |
|
152 | 152 | extensions.wrapfunction(logcmdutil, b'_initialrevs', _initialrevs) |
|
153 | 153 | |
|
154 | 154 | |
|
155 | 155 | def _clonesparsecmd(orig, ui, repo, *args, **opts): |
|
156 | 156 | include_pat = opts.get('include') |
|
157 | 157 | exclude_pat = opts.get('exclude') |
|
158 | 158 | enableprofile_pat = opts.get('enable_profile') |
|
159 | 159 | narrow_pat = opts.get('narrow') |
|
160 | 160 | include = exclude = enableprofile = False |
|
161 | 161 | if include_pat: |
|
162 | 162 | pat = include_pat |
|
163 | 163 | include = True |
|
164 | 164 | if exclude_pat: |
|
165 | 165 | pat = exclude_pat |
|
166 | 166 | exclude = True |
|
167 | 167 | if enableprofile_pat: |
|
168 | 168 | pat = enableprofile_pat |
|
169 | 169 | enableprofile = True |
|
170 | 170 | if sum([include, exclude, enableprofile]) > 1: |
|
171 | 171 | raise error.Abort(_(b"too many flags specified.")) |
|
172 | 172 | # if --narrow is passed, it means they are includes and excludes for narrow |
|
173 | 173 | # clone |
|
174 | 174 | if not narrow_pat and (include or exclude or enableprofile): |
|
175 | 175 | |
|
176 |
def clonesparse(orig, |
|
|
176 | def clonesparse(orig, ctx, *args, **kwargs): | |
|
177 | 177 | sparse.updateconfig( |
|
178 |
|
|
|
178 | ctx.repo().unfiltered(), | |
|
179 | 179 | pat, |
|
180 | 180 | {}, |
|
181 | 181 | include=include, |
|
182 | 182 | exclude=exclude, |
|
183 | 183 | enableprofile=enableprofile, |
|
184 | 184 | usereporootpaths=True, |
|
185 | 185 | ) |
|
186 |
return orig( |
|
|
186 | return orig(ctx, *args, **kwargs) | |
|
187 | 187 | |
|
188 |
extensions.wrapfunction( |
|
|
188 | extensions.wrapfunction(mergemod, b'update', clonesparse) | |
|
189 | 189 | return orig(ui, repo, *args, **opts) |
|
190 | 190 | |
|
191 | 191 | |
|
192 | 192 | def _setupclone(ui): |
|
193 | 193 | entry = commands.table[b'clone'] |
|
194 | 194 | entry[1].append((b'', b'enable-profile', [], b'enable a sparse profile')) |
|
195 | 195 | entry[1].append((b'', b'include', [], b'include sparse pattern')) |
|
196 | 196 | entry[1].append((b'', b'exclude', [], b'exclude sparse pattern')) |
|
197 | 197 | extensions.wrapcommand(commands.table, b'clone', _clonesparsecmd) |
|
198 | 198 | |
|
199 | 199 | |
|
200 | 200 | def _setupadd(ui): |
|
201 | 201 | entry = commands.table[b'add'] |
|
202 | 202 | entry[1].append( |
|
203 | 203 | ( |
|
204 | 204 | b's', |
|
205 | 205 | b'sparse', |
|
206 | 206 | None, |
|
207 | 207 | b'also include directories of added files in sparse config', |
|
208 | 208 | ) |
|
209 | 209 | ) |
|
210 | 210 | |
|
211 | 211 | def _add(orig, ui, repo, *pats, **opts): |
|
212 | 212 | if opts.get('sparse'): |
|
213 | 213 | dirs = set() |
|
214 | 214 | for pat in pats: |
|
215 | 215 | dirname, basename = util.split(pat) |
|
216 | 216 | dirs.add(dirname) |
|
217 | 217 | sparse.updateconfig(repo, list(dirs), opts, include=True) |
|
218 | 218 | return orig(ui, repo, *pats, **opts) |
|
219 | 219 | |
|
220 | 220 | extensions.wrapcommand(commands.table, b'add', _add) |
|
221 | 221 | |
|
222 | 222 | |
|
223 | 223 | def _setupdirstate(ui): |
|
224 | 224 | """Modify the dirstate to prevent stat'ing excluded files, |
|
225 | 225 | and to prevent modifications to files outside the checkout. |
|
226 | 226 | """ |
|
227 | 227 | |
|
228 | 228 | def walk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
229 | 229 | # hack to not exclude explicitly-specified paths so that they can |
|
230 | 230 | # be warned later on e.g. dirstate.add() |
|
231 | 231 | em = matchmod.exact(match.files()) |
|
232 | 232 | sm = matchmod.unionmatcher([self._sparsematcher, em]) |
|
233 | 233 | match = matchmod.intersectmatchers(match, sm) |
|
234 | 234 | return orig(self, match, subrepos, unknown, ignored, full) |
|
235 | 235 | |
|
236 | 236 | extensions.wrapfunction(dirstate.dirstate, b'walk', walk) |
|
237 | 237 | |
|
238 | 238 | # dirstate.rebuild should not add non-matching files |
|
239 | 239 | def _rebuild(orig, self, parent, allfiles, changedfiles=None): |
|
240 | 240 | matcher = self._sparsematcher |
|
241 | 241 | if not matcher.always(): |
|
242 | 242 | allfiles = [f for f in allfiles if matcher(f)] |
|
243 | 243 | if changedfiles: |
|
244 | 244 | changedfiles = [f for f in changedfiles if matcher(f)] |
|
245 | 245 | |
|
246 | 246 | if changedfiles is not None: |
|
247 | 247 | # In _rebuild, these files will be deleted from the dirstate |
|
248 | 248 | # when they are not found to be in allfiles |
|
249 | 249 | dirstatefilestoremove = {f for f in self if not matcher(f)} |
|
250 | 250 | changedfiles = dirstatefilestoremove.union(changedfiles) |
|
251 | 251 | |
|
252 | 252 | return orig(self, parent, allfiles, changedfiles) |
|
253 | 253 | |
|
254 | 254 | extensions.wrapfunction(dirstate.dirstate, b'rebuild', _rebuild) |
|
255 | 255 | |
|
256 | 256 | # Prevent adding files that are outside the sparse checkout |
|
257 | 257 | editfuncs = [ |
|
258 | 258 | b'normal', |
|
259 | 259 | b'add', |
|
260 | 260 | b'normallookup', |
|
261 | 261 | b'copy', |
|
262 | 262 | b'remove', |
|
263 | 263 | b'merge', |
|
264 | 264 | ] |
|
265 | 265 | hint = _( |
|
266 | 266 | b'include file with `hg debugsparse --include <pattern>` or use ' |
|
267 | 267 | + b'`hg add -s <file>` to include file directory while adding' |
|
268 | 268 | ) |
|
269 | 269 | for func in editfuncs: |
|
270 | 270 | |
|
271 | 271 | def _wrapper(orig, self, *args, **kwargs): |
|
272 | 272 | sparsematch = self._sparsematcher |
|
273 | 273 | if not sparsematch.always(): |
|
274 | 274 | for f in args: |
|
275 | 275 | if f is not None and not sparsematch(f) and f not in self: |
|
276 | 276 | raise error.Abort( |
|
277 | 277 | _( |
|
278 | 278 | b"cannot add '%s' - it is outside " |
|
279 | 279 | b"the sparse checkout" |
|
280 | 280 | ) |
|
281 | 281 | % f, |
|
282 | 282 | hint=hint, |
|
283 | 283 | ) |
|
284 | 284 | return orig(self, *args, **kwargs) |
|
285 | 285 | |
|
286 | 286 | extensions.wrapfunction(dirstate.dirstate, func, _wrapper) |
|
287 | 287 | |
|
288 | 288 | |
|
289 | 289 | @command( |
|
290 | 290 | b'debugsparse', |
|
291 | 291 | [ |
|
292 | 292 | (b'I', b'include', False, _(b'include files in the sparse checkout')), |
|
293 | 293 | (b'X', b'exclude', False, _(b'exclude files in the sparse checkout')), |
|
294 | 294 | (b'd', b'delete', False, _(b'delete an include/exclude rule')), |
|
295 | 295 | ( |
|
296 | 296 | b'f', |
|
297 | 297 | b'force', |
|
298 | 298 | False, |
|
299 | 299 | _(b'allow changing rules even with pending changes'), |
|
300 | 300 | ), |
|
301 | 301 | (b'', b'enable-profile', False, _(b'enables the specified profile')), |
|
302 | 302 | (b'', b'disable-profile', False, _(b'disables the specified profile')), |
|
303 | 303 | (b'', b'import-rules', False, _(b'imports rules from a file')), |
|
304 | 304 | (b'', b'clear-rules', False, _(b'clears local include/exclude rules')), |
|
305 | 305 | ( |
|
306 | 306 | b'', |
|
307 | 307 | b'refresh', |
|
308 | 308 | False, |
|
309 | 309 | _(b'updates the working after sparseness changes'), |
|
310 | 310 | ), |
|
311 | 311 | (b'', b'reset', False, _(b'makes the repo full again')), |
|
312 | 312 | ] |
|
313 | 313 | + commands.templateopts, |
|
314 | 314 | _(b'[--OPTION] PATTERN...'), |
|
315 | 315 | helpbasic=True, |
|
316 | 316 | ) |
|
317 | 317 | def debugsparse(ui, repo, *pats, **opts): |
|
318 | 318 | """make the current checkout sparse, or edit the existing checkout |
|
319 | 319 | |
|
320 | 320 | The sparse command is used to make the current checkout sparse. |
|
321 | 321 | This means files that don't meet the sparse condition will not be |
|
322 | 322 | written to disk, or show up in any working copy operations. It does |
|
323 | 323 | not affect files in history in any way. |
|
324 | 324 | |
|
325 | 325 | Passing no arguments prints the currently applied sparse rules. |
|
326 | 326 | |
|
327 | 327 | --include and --exclude are used to add and remove files from the sparse |
|
328 | 328 | checkout. The effects of adding an include or exclude rule are applied |
|
329 | 329 | immediately. If applying the new rule would cause a file with pending |
|
330 | 330 | changes to be added or removed, the command will fail. Pass --force to |
|
331 | 331 | force a rule change even with pending changes (the changes on disk will |
|
332 | 332 | be preserved). |
|
333 | 333 | |
|
334 | 334 | --delete removes an existing include/exclude rule. The effects are |
|
335 | 335 | immediate. |
|
336 | 336 | |
|
337 | 337 | --refresh refreshes the files on disk based on the sparse rules. This is |
|
338 | 338 | only necessary if .hg/sparse was changed by hand. |
|
339 | 339 | |
|
340 | 340 | --enable-profile and --disable-profile accept a path to a .hgsparse file. |
|
341 | 341 | This allows defining sparse checkouts and tracking them inside the |
|
342 | 342 | repository. This is useful for defining commonly used sparse checkouts for |
|
343 | 343 | many people to use. As the profile definition changes over time, the sparse |
|
344 | 344 | checkout will automatically be updated appropriately, depending on which |
|
345 | 345 | changeset is checked out. Changes to .hgsparse are not applied until they |
|
346 | 346 | have been committed. |
|
347 | 347 | |
|
348 | 348 | --import-rules accepts a path to a file containing rules in the .hgsparse |
|
349 | 349 | format, allowing you to add --include, --exclude and --enable-profile rules |
|
350 | 350 | in bulk. Like the --include, --exclude and --enable-profile switches, the |
|
351 | 351 | changes are applied immediately. |
|
352 | 352 | |
|
353 | 353 | --clear-rules removes all local include and exclude rules, while leaving |
|
354 | 354 | any enabled profiles in place. |
|
355 | 355 | |
|
356 | 356 | Returns 0 if editing the sparse checkout succeeds. |
|
357 | 357 | """ |
|
358 | 358 | opts = pycompat.byteskwargs(opts) |
|
359 | 359 | include = opts.get(b'include') |
|
360 | 360 | exclude = opts.get(b'exclude') |
|
361 | 361 | force = opts.get(b'force') |
|
362 | 362 | enableprofile = opts.get(b'enable_profile') |
|
363 | 363 | disableprofile = opts.get(b'disable_profile') |
|
364 | 364 | importrules = opts.get(b'import_rules') |
|
365 | 365 | clearrules = opts.get(b'clear_rules') |
|
366 | 366 | delete = opts.get(b'delete') |
|
367 | 367 | refresh = opts.get(b'refresh') |
|
368 | 368 | reset = opts.get(b'reset') |
|
369 | 369 | count = sum( |
|
370 | 370 | [ |
|
371 | 371 | include, |
|
372 | 372 | exclude, |
|
373 | 373 | enableprofile, |
|
374 | 374 | disableprofile, |
|
375 | 375 | delete, |
|
376 | 376 | importrules, |
|
377 | 377 | refresh, |
|
378 | 378 | clearrules, |
|
379 | 379 | reset, |
|
380 | 380 | ] |
|
381 | 381 | ) |
|
382 | 382 | if count > 1: |
|
383 | 383 | raise error.Abort(_(b"too many flags specified")) |
|
384 | 384 | |
|
385 | 385 | if count == 0: |
|
386 | 386 | if repo.vfs.exists(b'sparse'): |
|
387 | 387 | ui.status(repo.vfs.read(b"sparse") + b"\n") |
|
388 | 388 | temporaryincludes = sparse.readtemporaryincludes(repo) |
|
389 | 389 | if temporaryincludes: |
|
390 | 390 | ui.status( |
|
391 | 391 | _(b"Temporarily Included Files (for merge/rebase):\n") |
|
392 | 392 | ) |
|
393 | 393 | ui.status((b"\n".join(temporaryincludes) + b"\n")) |
|
394 | 394 | return |
|
395 | 395 | else: |
|
396 | 396 | raise error.Abort( |
|
397 | 397 | _( |
|
398 | 398 | b'the debugsparse command is only supported on' |
|
399 | 399 | b' sparse repositories' |
|
400 | 400 | ) |
|
401 | 401 | ) |
|
402 | 402 | |
|
403 | 403 | if include or exclude or delete or reset or enableprofile or disableprofile: |
|
404 | 404 | sparse.updateconfig( |
|
405 | 405 | repo, |
|
406 | 406 | pats, |
|
407 | 407 | opts, |
|
408 | 408 | include=include, |
|
409 | 409 | exclude=exclude, |
|
410 | 410 | reset=reset, |
|
411 | 411 | delete=delete, |
|
412 | 412 | enableprofile=enableprofile, |
|
413 | 413 | disableprofile=disableprofile, |
|
414 | 414 | force=force, |
|
415 | 415 | ) |
|
416 | 416 | |
|
417 | 417 | if importrules: |
|
418 | 418 | sparse.importfromfiles(repo, opts, pats, force=force) |
|
419 | 419 | |
|
420 | 420 | if clearrules: |
|
421 | 421 | sparse.clearrules(repo, force=force) |
|
422 | 422 | |
|
423 | 423 | if refresh: |
|
424 | 424 | try: |
|
425 | 425 | wlock = repo.wlock() |
|
426 | 426 | fcounts = map( |
|
427 | 427 | len, |
|
428 | 428 | sparse.refreshwdir( |
|
429 | 429 | repo, repo.status(), sparse.matcher(repo), force=force |
|
430 | 430 | ), |
|
431 | 431 | ) |
|
432 | 432 | sparse.printchanges( |
|
433 | 433 | ui, |
|
434 | 434 | opts, |
|
435 | 435 | added=fcounts[0], |
|
436 | 436 | dropped=fcounts[1], |
|
437 | 437 | conflicting=fcounts[2], |
|
438 | 438 | ) |
|
439 | 439 | finally: |
|
440 | 440 | wlock.release() |
@@ -1,1478 +1,1478 b'' | |||
|
1 | 1 | # hg.py - repository classes for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import errno |
|
12 | 12 | import os |
|
13 | 13 | import shutil |
|
14 | 14 | import stat |
|
15 | 15 | |
|
16 | 16 | from .i18n import _ |
|
17 | 17 | from .node import nullid |
|
18 | 18 | from .pycompat import getattr |
|
19 | 19 | |
|
20 | 20 | from . import ( |
|
21 | 21 | bookmarks, |
|
22 | 22 | bundlerepo, |
|
23 | 23 | cacheutil, |
|
24 | 24 | cmdutil, |
|
25 | 25 | destutil, |
|
26 | 26 | discovery, |
|
27 | 27 | error, |
|
28 | 28 | exchange, |
|
29 | 29 | extensions, |
|
30 | 30 | httppeer, |
|
31 | 31 | localrepo, |
|
32 | 32 | lock, |
|
33 | 33 | logcmdutil, |
|
34 | 34 | logexchange, |
|
35 | 35 | merge as mergemod, |
|
36 | 36 | mergestate as mergestatemod, |
|
37 | 37 | narrowspec, |
|
38 | 38 | node, |
|
39 | 39 | phases, |
|
40 | 40 | pycompat, |
|
41 | 41 | requirements, |
|
42 | 42 | scmutil, |
|
43 | 43 | sshpeer, |
|
44 | 44 | statichttprepo, |
|
45 | 45 | ui as uimod, |
|
46 | 46 | unionrepo, |
|
47 | 47 | url, |
|
48 | 48 | util, |
|
49 | 49 | verify as verifymod, |
|
50 | 50 | vfs as vfsmod, |
|
51 | 51 | ) |
|
52 | 52 | from .utils import hashutil |
|
53 | 53 | |
|
54 | 54 | release = lock.release |
|
55 | 55 | |
|
56 | 56 | # shared features |
|
57 | 57 | sharedbookmarks = b'bookmarks' |
|
58 | 58 | |
|
59 | 59 | |
|
60 | 60 | def _local(path): |
|
61 | 61 | path = util.expandpath(util.urllocalpath(path)) |
|
62 | 62 | |
|
63 | 63 | try: |
|
64 | 64 | # we use os.stat() directly here instead of os.path.isfile() |
|
65 | 65 | # because the latter started returning `False` on invalid path |
|
66 | 66 | # exceptions starting in 3.8 and we care about handling |
|
67 | 67 | # invalid paths specially here. |
|
68 | 68 | st = os.stat(path) |
|
69 | 69 | isfile = stat.S_ISREG(st.st_mode) |
|
70 | 70 | # Python 2 raises TypeError, Python 3 ValueError. |
|
71 | 71 | except (TypeError, ValueError) as e: |
|
72 | 72 | raise error.Abort( |
|
73 | 73 | _(b'invalid path %s: %s') % (path, pycompat.bytestr(e)) |
|
74 | 74 | ) |
|
75 | 75 | except OSError: |
|
76 | 76 | isfile = False |
|
77 | 77 | |
|
78 | 78 | return isfile and bundlerepo or localrepo |
|
79 | 79 | |
|
80 | 80 | |
|
81 | 81 | def addbranchrevs(lrepo, other, branches, revs): |
|
82 | 82 | peer = other.peer() # a courtesy to callers using a localrepo for other |
|
83 | 83 | hashbranch, branches = branches |
|
84 | 84 | if not hashbranch and not branches: |
|
85 | 85 | x = revs or None |
|
86 | 86 | if revs: |
|
87 | 87 | y = revs[0] |
|
88 | 88 | else: |
|
89 | 89 | y = None |
|
90 | 90 | return x, y |
|
91 | 91 | if revs: |
|
92 | 92 | revs = list(revs) |
|
93 | 93 | else: |
|
94 | 94 | revs = [] |
|
95 | 95 | |
|
96 | 96 | if not peer.capable(b'branchmap'): |
|
97 | 97 | if branches: |
|
98 | 98 | raise error.Abort(_(b"remote branch lookup not supported")) |
|
99 | 99 | revs.append(hashbranch) |
|
100 | 100 | return revs, revs[0] |
|
101 | 101 | |
|
102 | 102 | with peer.commandexecutor() as e: |
|
103 | 103 | branchmap = e.callcommand(b'branchmap', {}).result() |
|
104 | 104 | |
|
105 | 105 | def primary(branch): |
|
106 | 106 | if branch == b'.': |
|
107 | 107 | if not lrepo: |
|
108 | 108 | raise error.Abort(_(b"dirstate branch not accessible")) |
|
109 | 109 | branch = lrepo.dirstate.branch() |
|
110 | 110 | if branch in branchmap: |
|
111 | 111 | revs.extend(node.hex(r) for r in reversed(branchmap[branch])) |
|
112 | 112 | return True |
|
113 | 113 | else: |
|
114 | 114 | return False |
|
115 | 115 | |
|
116 | 116 | for branch in branches: |
|
117 | 117 | if not primary(branch): |
|
118 | 118 | raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) |
|
119 | 119 | if hashbranch: |
|
120 | 120 | if not primary(hashbranch): |
|
121 | 121 | revs.append(hashbranch) |
|
122 | 122 | return revs, revs[0] |
|
123 | 123 | |
|
124 | 124 | |
|
125 | 125 | def parseurl(path, branches=None): |
|
126 | 126 | '''parse url#branch, returning (url, (branch, branches))''' |
|
127 | 127 | |
|
128 | 128 | u = util.url(path) |
|
129 | 129 | branch = None |
|
130 | 130 | if u.fragment: |
|
131 | 131 | branch = u.fragment |
|
132 | 132 | u.fragment = None |
|
133 | 133 | return bytes(u), (branch, branches or []) |
|
134 | 134 | |
|
135 | 135 | |
|
136 | 136 | schemes = { |
|
137 | 137 | b'bundle': bundlerepo, |
|
138 | 138 | b'union': unionrepo, |
|
139 | 139 | b'file': _local, |
|
140 | 140 | b'http': httppeer, |
|
141 | 141 | b'https': httppeer, |
|
142 | 142 | b'ssh': sshpeer, |
|
143 | 143 | b'static-http': statichttprepo, |
|
144 | 144 | } |
|
145 | 145 | |
|
146 | 146 | |
|
147 | 147 | def _peerlookup(path): |
|
148 | 148 | u = util.url(path) |
|
149 | 149 | scheme = u.scheme or b'file' |
|
150 | 150 | thing = schemes.get(scheme) or schemes[b'file'] |
|
151 | 151 | try: |
|
152 | 152 | return thing(path) |
|
153 | 153 | except TypeError: |
|
154 | 154 | # we can't test callable(thing) because 'thing' can be an unloaded |
|
155 | 155 | # module that implements __call__ |
|
156 | 156 | if not util.safehasattr(thing, b'instance'): |
|
157 | 157 | raise |
|
158 | 158 | return thing |
|
159 | 159 | |
|
160 | 160 | |
|
161 | 161 | def islocal(repo): |
|
162 | 162 | '''return true if repo (or path pointing to repo) is local''' |
|
163 | 163 | if isinstance(repo, bytes): |
|
164 | 164 | try: |
|
165 | 165 | return _peerlookup(repo).islocal(repo) |
|
166 | 166 | except AttributeError: |
|
167 | 167 | return False |
|
168 | 168 | return repo.local() |
|
169 | 169 | |
|
170 | 170 | |
|
171 | 171 | def openpath(ui, path, sendaccept=True): |
|
172 | 172 | '''open path with open if local, url.open if remote''' |
|
173 | 173 | pathurl = util.url(path, parsequery=False, parsefragment=False) |
|
174 | 174 | if pathurl.islocal(): |
|
175 | 175 | return util.posixfile(pathurl.localpath(), b'rb') |
|
176 | 176 | else: |
|
177 | 177 | return url.open(ui, path, sendaccept=sendaccept) |
|
178 | 178 | |
|
179 | 179 | |
|
180 | 180 | # a list of (ui, repo) functions called for wire peer initialization |
|
181 | 181 | wirepeersetupfuncs = [] |
|
182 | 182 | |
|
183 | 183 | |
|
184 | 184 | def _peerorrepo( |
|
185 | 185 | ui, path, create=False, presetupfuncs=None, intents=None, createopts=None |
|
186 | 186 | ): |
|
187 | 187 | """return a repository object for the specified path""" |
|
188 | 188 | obj = _peerlookup(path).instance( |
|
189 | 189 | ui, path, create, intents=intents, createopts=createopts |
|
190 | 190 | ) |
|
191 | 191 | ui = getattr(obj, "ui", ui) |
|
192 | 192 | for f in presetupfuncs or []: |
|
193 | 193 | f(ui, obj) |
|
194 | 194 | ui.log(b'extension', b'- executing reposetup hooks\n') |
|
195 | 195 | with util.timedcm('all reposetup') as allreposetupstats: |
|
196 | 196 | for name, module in extensions.extensions(ui): |
|
197 | 197 | ui.log(b'extension', b' - running reposetup for %s\n', name) |
|
198 | 198 | hook = getattr(module, 'reposetup', None) |
|
199 | 199 | if hook: |
|
200 | 200 | with util.timedcm('reposetup %r', name) as stats: |
|
201 | 201 | hook(ui, obj) |
|
202 | 202 | ui.log( |
|
203 | 203 | b'extension', b' > reposetup for %s took %s\n', name, stats |
|
204 | 204 | ) |
|
205 | 205 | ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) |
|
206 | 206 | if not obj.local(): |
|
207 | 207 | for f in wirepeersetupfuncs: |
|
208 | 208 | f(ui, obj) |
|
209 | 209 | return obj |
|
210 | 210 | |
|
211 | 211 | |
|
212 | 212 | def repository( |
|
213 | 213 | ui, |
|
214 | 214 | path=b'', |
|
215 | 215 | create=False, |
|
216 | 216 | presetupfuncs=None, |
|
217 | 217 | intents=None, |
|
218 | 218 | createopts=None, |
|
219 | 219 | ): |
|
220 | 220 | """return a repository object for the specified path""" |
|
221 | 221 | peer = _peerorrepo( |
|
222 | 222 | ui, |
|
223 | 223 | path, |
|
224 | 224 | create, |
|
225 | 225 | presetupfuncs=presetupfuncs, |
|
226 | 226 | intents=intents, |
|
227 | 227 | createopts=createopts, |
|
228 | 228 | ) |
|
229 | 229 | repo = peer.local() |
|
230 | 230 | if not repo: |
|
231 | 231 | raise error.Abort( |
|
232 | 232 | _(b"repository '%s' is not local") % (path or peer.url()) |
|
233 | 233 | ) |
|
234 | 234 | return repo.filtered(b'visible') |
|
235 | 235 | |
|
236 | 236 | |
|
237 | 237 | def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): |
|
238 | 238 | '''return a repository peer for the specified path''' |
|
239 | 239 | rui = remoteui(uiorrepo, opts) |
|
240 | 240 | return _peerorrepo( |
|
241 | 241 | rui, path, create, intents=intents, createopts=createopts |
|
242 | 242 | ).peer() |
|
243 | 243 | |
|
244 | 244 | |
|
245 | 245 | def defaultdest(source): |
|
246 | 246 | '''return default destination of clone if none is given |
|
247 | 247 | |
|
248 | 248 | >>> defaultdest(b'foo') |
|
249 | 249 | 'foo' |
|
250 | 250 | >>> defaultdest(b'/foo/bar') |
|
251 | 251 | 'bar' |
|
252 | 252 | >>> defaultdest(b'/') |
|
253 | 253 | '' |
|
254 | 254 | >>> defaultdest(b'') |
|
255 | 255 | '' |
|
256 | 256 | >>> defaultdest(b'http://example.org/') |
|
257 | 257 | '' |
|
258 | 258 | >>> defaultdest(b'http://example.org/foo/') |
|
259 | 259 | 'foo' |
|
260 | 260 | ''' |
|
261 | 261 | path = util.url(source).path |
|
262 | 262 | if not path: |
|
263 | 263 | return b'' |
|
264 | 264 | return os.path.basename(os.path.normpath(path)) |
|
265 | 265 | |
|
266 | 266 | |
|
267 | 267 | def sharedreposource(repo): |
|
268 | 268 | """Returns repository object for source repository of a shared repo. |
|
269 | 269 | |
|
270 | 270 | If repo is not a shared repository, returns None. |
|
271 | 271 | """ |
|
272 | 272 | if repo.sharedpath == repo.path: |
|
273 | 273 | return None |
|
274 | 274 | |
|
275 | 275 | if util.safehasattr(repo, b'srcrepo') and repo.srcrepo: |
|
276 | 276 | return repo.srcrepo |
|
277 | 277 | |
|
278 | 278 | # the sharedpath always ends in the .hg; we want the path to the repo |
|
279 | 279 | source = repo.vfs.split(repo.sharedpath)[0] |
|
280 | 280 | srcurl, branches = parseurl(source) |
|
281 | 281 | srcrepo = repository(repo.ui, srcurl) |
|
282 | 282 | repo.srcrepo = srcrepo |
|
283 | 283 | return srcrepo |
|
284 | 284 | |
|
285 | 285 | |
|
286 | 286 | def share( |
|
287 | 287 | ui, |
|
288 | 288 | source, |
|
289 | 289 | dest=None, |
|
290 | 290 | update=True, |
|
291 | 291 | bookmarks=True, |
|
292 | 292 | defaultpath=None, |
|
293 | 293 | relative=False, |
|
294 | 294 | ): |
|
295 | 295 | '''create a shared repository''' |
|
296 | 296 | |
|
297 | 297 | if not islocal(source): |
|
298 | 298 | raise error.Abort(_(b'can only share local repositories')) |
|
299 | 299 | |
|
300 | 300 | if not dest: |
|
301 | 301 | dest = defaultdest(source) |
|
302 | 302 | else: |
|
303 | 303 | dest = ui.expandpath(dest) |
|
304 | 304 | |
|
305 | 305 | if isinstance(source, bytes): |
|
306 | 306 | origsource = ui.expandpath(source) |
|
307 | 307 | source, branches = parseurl(origsource) |
|
308 | 308 | srcrepo = repository(ui, source) |
|
309 | 309 | rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) |
|
310 | 310 | else: |
|
311 | 311 | srcrepo = source.local() |
|
312 | 312 | checkout = None |
|
313 | 313 | |
|
314 | 314 | shareditems = set() |
|
315 | 315 | if bookmarks: |
|
316 | 316 | shareditems.add(sharedbookmarks) |
|
317 | 317 | |
|
318 | 318 | r = repository( |
|
319 | 319 | ui, |
|
320 | 320 | dest, |
|
321 | 321 | create=True, |
|
322 | 322 | createopts={ |
|
323 | 323 | b'sharedrepo': srcrepo, |
|
324 | 324 | b'sharedrelative': relative, |
|
325 | 325 | b'shareditems': shareditems, |
|
326 | 326 | }, |
|
327 | 327 | ) |
|
328 | 328 | |
|
329 | 329 | postshare(srcrepo, r, defaultpath=defaultpath) |
|
330 | 330 | r = repository(ui, dest) |
|
331 | 331 | _postshareupdate(r, update, checkout=checkout) |
|
332 | 332 | return r |
|
333 | 333 | |
|
334 | 334 | |
|
335 | 335 | def _prependsourcehgrc(repo): |
|
336 | 336 | """ copies the source repo config and prepend it in current repo .hg/hgrc |
|
337 | 337 | on unshare. This is only done if the share was perfomed using share safe |
|
338 | 338 | method where we share config of source in shares""" |
|
339 | 339 | srcvfs = vfsmod.vfs(repo.sharedpath) |
|
340 | 340 | dstvfs = vfsmod.vfs(repo.path) |
|
341 | 341 | |
|
342 | 342 | if not srcvfs.exists(b'hgrc'): |
|
343 | 343 | return |
|
344 | 344 | |
|
345 | 345 | currentconfig = b'' |
|
346 | 346 | if dstvfs.exists(b'hgrc'): |
|
347 | 347 | currentconfig = dstvfs.read(b'hgrc') |
|
348 | 348 | |
|
349 | 349 | with dstvfs(b'hgrc', b'wb') as fp: |
|
350 | 350 | sourceconfig = srcvfs.read(b'hgrc') |
|
351 | 351 | fp.write(b"# Config copied from shared source\n") |
|
352 | 352 | fp.write(sourceconfig) |
|
353 | 353 | fp.write(b'\n') |
|
354 | 354 | fp.write(currentconfig) |
|
355 | 355 | |
|
356 | 356 | |
|
357 | 357 | def unshare(ui, repo): |
|
358 | 358 | """convert a shared repository to a normal one |
|
359 | 359 | |
|
360 | 360 | Copy the store data to the repo and remove the sharedpath data. |
|
361 | 361 | |
|
362 | 362 | Returns a new repository object representing the unshared repository. |
|
363 | 363 | |
|
364 | 364 | The passed repository object is not usable after this function is |
|
365 | 365 | called. |
|
366 | 366 | """ |
|
367 | 367 | |
|
368 | 368 | with repo.lock(): |
|
369 | 369 | # we use locks here because if we race with commit, we |
|
370 | 370 | # can end up with extra data in the cloned revlogs that's |
|
371 | 371 | # not pointed to by changesets, thus causing verify to |
|
372 | 372 | # fail |
|
373 | 373 | destlock = copystore(ui, repo, repo.path) |
|
374 | 374 | with destlock or util.nullcontextmanager(): |
|
375 | 375 | if requirements.SHARESAFE_REQUIREMENT in repo.requirements: |
|
376 | 376 | # we were sharing .hg/hgrc of the share source with the current |
|
377 | 377 | # repo. We need to copy that while unsharing otherwise it can |
|
378 | 378 | # disable hooks and other checks |
|
379 | 379 | _prependsourcehgrc(repo) |
|
380 | 380 | |
|
381 | 381 | sharefile = repo.vfs.join(b'sharedpath') |
|
382 | 382 | util.rename(sharefile, sharefile + b'.old') |
|
383 | 383 | |
|
384 | 384 | repo.requirements.discard(requirements.SHARED_REQUIREMENT) |
|
385 | 385 | repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT) |
|
386 | 386 | scmutil.writereporequirements(repo) |
|
387 | 387 | |
|
388 | 388 | # Removing share changes some fundamental properties of the repo instance. |
|
389 | 389 | # So we instantiate a new repo object and operate on it rather than |
|
390 | 390 | # try to keep the existing repo usable. |
|
391 | 391 | newrepo = repository(repo.baseui, repo.root, create=False) |
|
392 | 392 | |
|
393 | 393 | # TODO: figure out how to access subrepos that exist, but were previously |
|
394 | 394 | # removed from .hgsub |
|
395 | 395 | c = newrepo[b'.'] |
|
396 | 396 | subs = c.substate |
|
397 | 397 | for s in sorted(subs): |
|
398 | 398 | c.sub(s).unshare() |
|
399 | 399 | |
|
400 | 400 | localrepo.poisonrepository(repo) |
|
401 | 401 | |
|
402 | 402 | return newrepo |
|
403 | 403 | |
|
404 | 404 | |
|
405 | 405 | def postshare(sourcerepo, destrepo, defaultpath=None): |
|
406 | 406 | """Called after a new shared repo is created. |
|
407 | 407 | |
|
408 | 408 | The new repo only has a requirements file and pointer to the source. |
|
409 | 409 | This function configures additional shared data. |
|
410 | 410 | |
|
411 | 411 | Extensions can wrap this function and write additional entries to |
|
412 | 412 | destrepo/.hg/shared to indicate additional pieces of data to be shared. |
|
413 | 413 | """ |
|
414 | 414 | default = defaultpath or sourcerepo.ui.config(b'paths', b'default') |
|
415 | 415 | if default: |
|
416 | 416 | template = b'[paths]\ndefault = %s\n' |
|
417 | 417 | destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default)) |
|
418 | 418 | if requirements.NARROW_REQUIREMENT in sourcerepo.requirements: |
|
419 | 419 | with destrepo.wlock(): |
|
420 | 420 | narrowspec.copytoworkingcopy(destrepo) |
|
421 | 421 | |
|
422 | 422 | |
|
423 | 423 | def _postshareupdate(repo, update, checkout=None): |
|
424 | 424 | """Maybe perform a working directory update after a shared repo is created. |
|
425 | 425 | |
|
426 | 426 | ``update`` can be a boolean or a revision to update to. |
|
427 | 427 | """ |
|
428 | 428 | if not update: |
|
429 | 429 | return |
|
430 | 430 | |
|
431 | 431 | repo.ui.status(_(b"updating working directory\n")) |
|
432 | 432 | if update is not True: |
|
433 | 433 | checkout = update |
|
434 | 434 | for test in (checkout, b'default', b'tip'): |
|
435 | 435 | if test is None: |
|
436 | 436 | continue |
|
437 | 437 | try: |
|
438 | 438 | uprev = repo.lookup(test) |
|
439 | 439 | break |
|
440 | 440 | except error.RepoLookupError: |
|
441 | 441 | continue |
|
442 | 442 | _update(repo, uprev) |
|
443 | 443 | |
|
444 | 444 | |
|
445 | 445 | def copystore(ui, srcrepo, destpath): |
|
446 | 446 | '''copy files from store of srcrepo in destpath |
|
447 | 447 | |
|
448 | 448 | returns destlock |
|
449 | 449 | ''' |
|
450 | 450 | destlock = None |
|
451 | 451 | try: |
|
452 | 452 | hardlink = None |
|
453 | 453 | topic = _(b'linking') if hardlink else _(b'copying') |
|
454 | 454 | with ui.makeprogress(topic, unit=_(b'files')) as progress: |
|
455 | 455 | num = 0 |
|
456 | 456 | srcpublishing = srcrepo.publishing() |
|
457 | 457 | srcvfs = vfsmod.vfs(srcrepo.sharedpath) |
|
458 | 458 | dstvfs = vfsmod.vfs(destpath) |
|
459 | 459 | for f in srcrepo.store.copylist(): |
|
460 | 460 | if srcpublishing and f.endswith(b'phaseroots'): |
|
461 | 461 | continue |
|
462 | 462 | dstbase = os.path.dirname(f) |
|
463 | 463 | if dstbase and not dstvfs.exists(dstbase): |
|
464 | 464 | dstvfs.mkdir(dstbase) |
|
465 | 465 | if srcvfs.exists(f): |
|
466 | 466 | if f.endswith(b'data'): |
|
467 | 467 | # 'dstbase' may be empty (e.g. revlog format 0) |
|
468 | 468 | lockfile = os.path.join(dstbase, b"lock") |
|
469 | 469 | # lock to avoid premature writing to the target |
|
470 | 470 | destlock = lock.lock(dstvfs, lockfile) |
|
471 | 471 | hardlink, n = util.copyfiles( |
|
472 | 472 | srcvfs.join(f), dstvfs.join(f), hardlink, progress |
|
473 | 473 | ) |
|
474 | 474 | num += n |
|
475 | 475 | if hardlink: |
|
476 | 476 | ui.debug(b"linked %d files\n" % num) |
|
477 | 477 | else: |
|
478 | 478 | ui.debug(b"copied %d files\n" % num) |
|
479 | 479 | return destlock |
|
480 | 480 | except: # re-raises |
|
481 | 481 | release(destlock) |
|
482 | 482 | raise |
|
483 | 483 | |
|
484 | 484 | |
|
485 | 485 | def clonewithshare( |
|
486 | 486 | ui, |
|
487 | 487 | peeropts, |
|
488 | 488 | sharepath, |
|
489 | 489 | source, |
|
490 | 490 | srcpeer, |
|
491 | 491 | dest, |
|
492 | 492 | pull=False, |
|
493 | 493 | rev=None, |
|
494 | 494 | update=True, |
|
495 | 495 | stream=False, |
|
496 | 496 | ): |
|
497 | 497 | """Perform a clone using a shared repo. |
|
498 | 498 | |
|
499 | 499 | The store for the repository will be located at <sharepath>/.hg. The |
|
500 | 500 | specified revisions will be cloned or pulled from "source". A shared repo |
|
501 | 501 | will be created at "dest" and a working copy will be created if "update" is |
|
502 | 502 | True. |
|
503 | 503 | """ |
|
504 | 504 | revs = None |
|
505 | 505 | if rev: |
|
506 | 506 | if not srcpeer.capable(b'lookup'): |
|
507 | 507 | raise error.Abort( |
|
508 | 508 | _( |
|
509 | 509 | b"src repository does not support " |
|
510 | 510 | b"revision lookup and so doesn't " |
|
511 | 511 | b"support clone by revision" |
|
512 | 512 | ) |
|
513 | 513 | ) |
|
514 | 514 | |
|
515 | 515 | # TODO this is batchable. |
|
516 | 516 | remoterevs = [] |
|
517 | 517 | for r in rev: |
|
518 | 518 | with srcpeer.commandexecutor() as e: |
|
519 | 519 | remoterevs.append( |
|
520 | 520 | e.callcommand(b'lookup', {b'key': r,}).result() |
|
521 | 521 | ) |
|
522 | 522 | revs = remoterevs |
|
523 | 523 | |
|
524 | 524 | # Obtain a lock before checking for or cloning the pooled repo otherwise |
|
525 | 525 | # 2 clients may race creating or populating it. |
|
526 | 526 | pooldir = os.path.dirname(sharepath) |
|
527 | 527 | # lock class requires the directory to exist. |
|
528 | 528 | try: |
|
529 | 529 | util.makedir(pooldir, False) |
|
530 | 530 | except OSError as e: |
|
531 | 531 | if e.errno != errno.EEXIST: |
|
532 | 532 | raise |
|
533 | 533 | |
|
534 | 534 | poolvfs = vfsmod.vfs(pooldir) |
|
535 | 535 | basename = os.path.basename(sharepath) |
|
536 | 536 | |
|
537 | 537 | with lock.lock(poolvfs, b'%s.lock' % basename): |
|
538 | 538 | if os.path.exists(sharepath): |
|
539 | 539 | ui.status( |
|
540 | 540 | _(b'(sharing from existing pooled repository %s)\n') % basename |
|
541 | 541 | ) |
|
542 | 542 | else: |
|
543 | 543 | ui.status( |
|
544 | 544 | _(b'(sharing from new pooled repository %s)\n') % basename |
|
545 | 545 | ) |
|
546 | 546 | # Always use pull mode because hardlinks in share mode don't work |
|
547 | 547 | # well. Never update because working copies aren't necessary in |
|
548 | 548 | # share mode. |
|
549 | 549 | clone( |
|
550 | 550 | ui, |
|
551 | 551 | peeropts, |
|
552 | 552 | source, |
|
553 | 553 | dest=sharepath, |
|
554 | 554 | pull=True, |
|
555 | 555 | revs=rev, |
|
556 | 556 | update=False, |
|
557 | 557 | stream=stream, |
|
558 | 558 | ) |
|
559 | 559 | |
|
560 | 560 | # Resolve the value to put in [paths] section for the source. |
|
561 | 561 | if islocal(source): |
|
562 | 562 | defaultpath = os.path.abspath(util.urllocalpath(source)) |
|
563 | 563 | else: |
|
564 | 564 | defaultpath = source |
|
565 | 565 | |
|
566 | 566 | sharerepo = repository(ui, path=sharepath) |
|
567 | 567 | destrepo = share( |
|
568 | 568 | ui, |
|
569 | 569 | sharerepo, |
|
570 | 570 | dest=dest, |
|
571 | 571 | update=False, |
|
572 | 572 | bookmarks=False, |
|
573 | 573 | defaultpath=defaultpath, |
|
574 | 574 | ) |
|
575 | 575 | |
|
576 | 576 | # We need to perform a pull against the dest repo to fetch bookmarks |
|
577 | 577 | # and other non-store data that isn't shared by default. In the case of |
|
578 | 578 | # non-existing shared repo, this means we pull from the remote twice. This |
|
579 | 579 | # is a bit weird. But at the time it was implemented, there wasn't an easy |
|
580 | 580 | # way to pull just non-changegroup data. |
|
581 | 581 | exchange.pull(destrepo, srcpeer, heads=revs) |
|
582 | 582 | |
|
583 | 583 | _postshareupdate(destrepo, update) |
|
584 | 584 | |
|
585 | 585 | return srcpeer, peer(ui, peeropts, dest) |
|
586 | 586 | |
|
587 | 587 | |
|
588 | 588 | # Recomputing branch cache might be slow on big repos, |
|
589 | 589 | # so just copy it |
|
590 | 590 | def _copycache(srcrepo, dstcachedir, fname): |
|
591 | 591 | """copy a cache from srcrepo to destcachedir (if it exists)""" |
|
592 | 592 | srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname) |
|
593 | 593 | dstbranchcache = os.path.join(dstcachedir, fname) |
|
594 | 594 | if os.path.exists(srcbranchcache): |
|
595 | 595 | if not os.path.exists(dstcachedir): |
|
596 | 596 | os.mkdir(dstcachedir) |
|
597 | 597 | util.copyfile(srcbranchcache, dstbranchcache) |
|
598 | 598 | |
|
599 | 599 | |
|
600 | 600 | def clone( |
|
601 | 601 | ui, |
|
602 | 602 | peeropts, |
|
603 | 603 | source, |
|
604 | 604 | dest=None, |
|
605 | 605 | pull=False, |
|
606 | 606 | revs=None, |
|
607 | 607 | update=True, |
|
608 | 608 | stream=False, |
|
609 | 609 | branch=None, |
|
610 | 610 | shareopts=None, |
|
611 | 611 | storeincludepats=None, |
|
612 | 612 | storeexcludepats=None, |
|
613 | 613 | depth=None, |
|
614 | 614 | ): |
|
615 | 615 | """Make a copy of an existing repository. |
|
616 | 616 | |
|
617 | 617 | Create a copy of an existing repository in a new directory. The |
|
618 | 618 | source and destination are URLs, as passed to the repository |
|
619 | 619 | function. Returns a pair of repository peers, the source and |
|
620 | 620 | newly created destination. |
|
621 | 621 | |
|
622 | 622 | The location of the source is added to the new repository's |
|
623 | 623 | .hg/hgrc file, as the default to be used for future pulls and |
|
624 | 624 | pushes. |
|
625 | 625 | |
|
626 | 626 | If an exception is raised, the partly cloned/updated destination |
|
627 | 627 | repository will be deleted. |
|
628 | 628 | |
|
629 | 629 | Arguments: |
|
630 | 630 | |
|
631 | 631 | source: repository object or URL |
|
632 | 632 | |
|
633 | 633 | dest: URL of destination repository to create (defaults to base |
|
634 | 634 | name of source repository) |
|
635 | 635 | |
|
636 | 636 | pull: always pull from source repository, even in local case or if the |
|
637 | 637 | server prefers streaming |
|
638 | 638 | |
|
639 | 639 | stream: stream raw data uncompressed from repository (fast over |
|
640 | 640 | LAN, slow over WAN) |
|
641 | 641 | |
|
642 | 642 | revs: revision to clone up to (implies pull=True) |
|
643 | 643 | |
|
644 | 644 | update: update working directory after clone completes, if |
|
645 | 645 | destination is local repository (True means update to default rev, |
|
646 | 646 | anything else is treated as a revision) |
|
647 | 647 | |
|
648 | 648 | branch: branches to clone |
|
649 | 649 | |
|
650 | 650 | shareopts: dict of options to control auto sharing behavior. The "pool" key |
|
651 | 651 | activates auto sharing mode and defines the directory for stores. The |
|
652 | 652 | "mode" key determines how to construct the directory name of the shared |
|
653 | 653 | repository. "identity" means the name is derived from the node of the first |
|
654 | 654 | changeset in the repository. "remote" means the name is derived from the |
|
655 | 655 | remote's path/URL. Defaults to "identity." |
|
656 | 656 | |
|
657 | 657 | storeincludepats and storeexcludepats: sets of file patterns to include and |
|
658 | 658 | exclude in the repository copy, respectively. If not defined, all files |
|
659 | 659 | will be included (a "full" clone). Otherwise a "narrow" clone containing |
|
660 | 660 | only the requested files will be performed. If ``storeincludepats`` is not |
|
661 | 661 | defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be |
|
662 | 662 | ``path:.``. If both are empty sets, no files will be cloned. |
|
663 | 663 | """ |
|
664 | 664 | |
|
665 | 665 | if isinstance(source, bytes): |
|
666 | 666 | origsource = ui.expandpath(source) |
|
667 | 667 | source, branches = parseurl(origsource, branch) |
|
668 | 668 | srcpeer = peer(ui, peeropts, source) |
|
669 | 669 | else: |
|
670 | 670 | srcpeer = source.peer() # in case we were called with a localrepo |
|
671 | 671 | branches = (None, branch or []) |
|
672 | 672 | origsource = source = srcpeer.url() |
|
673 | 673 | revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) |
|
674 | 674 | |
|
675 | 675 | if dest is None: |
|
676 | 676 | dest = defaultdest(source) |
|
677 | 677 | if dest: |
|
678 | 678 | ui.status(_(b"destination directory: %s\n") % dest) |
|
679 | 679 | else: |
|
680 | 680 | dest = ui.expandpath(dest) |
|
681 | 681 | |
|
682 | 682 | dest = util.urllocalpath(dest) |
|
683 | 683 | source = util.urllocalpath(source) |
|
684 | 684 | |
|
685 | 685 | if not dest: |
|
686 | 686 | raise error.Abort(_(b"empty destination path is not valid")) |
|
687 | 687 | |
|
688 | 688 | destvfs = vfsmod.vfs(dest, expandpath=True) |
|
689 | 689 | if destvfs.lexists(): |
|
690 | 690 | if not destvfs.isdir(): |
|
691 | 691 | raise error.Abort(_(b"destination '%s' already exists") % dest) |
|
692 | 692 | elif destvfs.listdir(): |
|
693 | 693 | raise error.Abort(_(b"destination '%s' is not empty") % dest) |
|
694 | 694 | |
|
695 | 695 | createopts = {} |
|
696 | 696 | narrow = False |
|
697 | 697 | |
|
698 | 698 | if storeincludepats is not None: |
|
699 | 699 | narrowspec.validatepatterns(storeincludepats) |
|
700 | 700 | narrow = True |
|
701 | 701 | |
|
702 | 702 | if storeexcludepats is not None: |
|
703 | 703 | narrowspec.validatepatterns(storeexcludepats) |
|
704 | 704 | narrow = True |
|
705 | 705 | |
|
706 | 706 | if narrow: |
|
707 | 707 | # Include everything by default if only exclusion patterns defined. |
|
708 | 708 | if storeexcludepats and not storeincludepats: |
|
709 | 709 | storeincludepats = {b'path:.'} |
|
710 | 710 | |
|
711 | 711 | createopts[b'narrowfiles'] = True |
|
712 | 712 | |
|
713 | 713 | if depth: |
|
714 | 714 | createopts[b'shallowfilestore'] = True |
|
715 | 715 | |
|
716 | 716 | if srcpeer.capable(b'lfs-serve'): |
|
717 | 717 | # Repository creation honors the config if it disabled the extension, so |
|
718 | 718 | # we can't just announce that lfs will be enabled. This check avoids |
|
719 | 719 | # saying that lfs will be enabled, and then saying it's an unknown |
|
720 | 720 | # feature. The lfs creation option is set in either case so that a |
|
721 | 721 | # requirement is added. If the extension is explicitly disabled but the |
|
722 | 722 | # requirement is set, the clone aborts early, before transferring any |
|
723 | 723 | # data. |
|
724 | 724 | createopts[b'lfs'] = True |
|
725 | 725 | |
|
726 | 726 | if extensions.disabled_help(b'lfs'): |
|
727 | 727 | ui.status( |
|
728 | 728 | _( |
|
729 | 729 | b'(remote is using large file support (lfs), but it is ' |
|
730 | 730 | b'explicitly disabled in the local configuration)\n' |
|
731 | 731 | ) |
|
732 | 732 | ) |
|
733 | 733 | else: |
|
734 | 734 | ui.status( |
|
735 | 735 | _( |
|
736 | 736 | b'(remote is using large file support (lfs); lfs will ' |
|
737 | 737 | b'be enabled for this repository)\n' |
|
738 | 738 | ) |
|
739 | 739 | ) |
|
740 | 740 | |
|
741 | 741 | shareopts = shareopts or {} |
|
742 | 742 | sharepool = shareopts.get(b'pool') |
|
743 | 743 | sharenamemode = shareopts.get(b'mode') |
|
744 | 744 | if sharepool and islocal(dest): |
|
745 | 745 | sharepath = None |
|
746 | 746 | if sharenamemode == b'identity': |
|
747 | 747 | # Resolve the name from the initial changeset in the remote |
|
748 | 748 | # repository. This returns nullid when the remote is empty. It |
|
749 | 749 | # raises RepoLookupError if revision 0 is filtered or otherwise |
|
750 | 750 | # not available. If we fail to resolve, sharing is not enabled. |
|
751 | 751 | try: |
|
752 | 752 | with srcpeer.commandexecutor() as e: |
|
753 | 753 | rootnode = e.callcommand( |
|
754 | 754 | b'lookup', {b'key': b'0',} |
|
755 | 755 | ).result() |
|
756 | 756 | |
|
757 | 757 | if rootnode != node.nullid: |
|
758 | 758 | sharepath = os.path.join(sharepool, node.hex(rootnode)) |
|
759 | 759 | else: |
|
760 | 760 | ui.status( |
|
761 | 761 | _( |
|
762 | 762 | b'(not using pooled storage: ' |
|
763 | 763 | b'remote appears to be empty)\n' |
|
764 | 764 | ) |
|
765 | 765 | ) |
|
766 | 766 | except error.RepoLookupError: |
|
767 | 767 | ui.status( |
|
768 | 768 | _( |
|
769 | 769 | b'(not using pooled storage: ' |
|
770 | 770 | b'unable to resolve identity of remote)\n' |
|
771 | 771 | ) |
|
772 | 772 | ) |
|
773 | 773 | elif sharenamemode == b'remote': |
|
774 | 774 | sharepath = os.path.join( |
|
775 | 775 | sharepool, node.hex(hashutil.sha1(source).digest()) |
|
776 | 776 | ) |
|
777 | 777 | else: |
|
778 | 778 | raise error.Abort( |
|
779 | 779 | _(b'unknown share naming mode: %s') % sharenamemode |
|
780 | 780 | ) |
|
781 | 781 | |
|
782 | 782 | # TODO this is a somewhat arbitrary restriction. |
|
783 | 783 | if narrow: |
|
784 | 784 | ui.status(_(b'(pooled storage not supported for narrow clones)\n')) |
|
785 | 785 | sharepath = None |
|
786 | 786 | |
|
787 | 787 | if sharepath: |
|
788 | 788 | return clonewithshare( |
|
789 | 789 | ui, |
|
790 | 790 | peeropts, |
|
791 | 791 | sharepath, |
|
792 | 792 | source, |
|
793 | 793 | srcpeer, |
|
794 | 794 | dest, |
|
795 | 795 | pull=pull, |
|
796 | 796 | rev=revs, |
|
797 | 797 | update=update, |
|
798 | 798 | stream=stream, |
|
799 | 799 | ) |
|
800 | 800 | |
|
801 | 801 | srclock = destlock = cleandir = None |
|
802 | 802 | srcrepo = srcpeer.local() |
|
803 | 803 | try: |
|
804 | 804 | abspath = origsource |
|
805 | 805 | if islocal(origsource): |
|
806 | 806 | abspath = os.path.abspath(util.urllocalpath(origsource)) |
|
807 | 807 | |
|
808 | 808 | if islocal(dest): |
|
809 | 809 | cleandir = dest |
|
810 | 810 | |
|
811 | 811 | copy = False |
|
812 | 812 | if ( |
|
813 | 813 | srcrepo |
|
814 | 814 | and srcrepo.cancopy() |
|
815 | 815 | and islocal(dest) |
|
816 | 816 | and not phases.hassecret(srcrepo) |
|
817 | 817 | ): |
|
818 | 818 | copy = not pull and not revs |
|
819 | 819 | |
|
820 | 820 | # TODO this is a somewhat arbitrary restriction. |
|
821 | 821 | if narrow: |
|
822 | 822 | copy = False |
|
823 | 823 | |
|
824 | 824 | if copy: |
|
825 | 825 | try: |
|
826 | 826 | # we use a lock here because if we race with commit, we |
|
827 | 827 | # can end up with extra data in the cloned revlogs that's |
|
828 | 828 | # not pointed to by changesets, thus causing verify to |
|
829 | 829 | # fail |
|
830 | 830 | srclock = srcrepo.lock(wait=False) |
|
831 | 831 | except error.LockError: |
|
832 | 832 | copy = False |
|
833 | 833 | |
|
834 | 834 | if copy: |
|
835 | 835 | srcrepo.hook(b'preoutgoing', throw=True, source=b'clone') |
|
836 | 836 | hgdir = os.path.realpath(os.path.join(dest, b".hg")) |
|
837 | 837 | if not os.path.exists(dest): |
|
838 | 838 | util.makedirs(dest) |
|
839 | 839 | else: |
|
840 | 840 | # only clean up directories we create ourselves |
|
841 | 841 | cleandir = hgdir |
|
842 | 842 | try: |
|
843 | 843 | destpath = hgdir |
|
844 | 844 | util.makedir(destpath, notindexed=True) |
|
845 | 845 | except OSError as inst: |
|
846 | 846 | if inst.errno == errno.EEXIST: |
|
847 | 847 | cleandir = None |
|
848 | 848 | raise error.Abort( |
|
849 | 849 | _(b"destination '%s' already exists") % dest |
|
850 | 850 | ) |
|
851 | 851 | raise |
|
852 | 852 | |
|
853 | 853 | destlock = copystore(ui, srcrepo, destpath) |
|
854 | 854 | # copy bookmarks over |
|
855 | 855 | srcbookmarks = srcrepo.vfs.join(b'bookmarks') |
|
856 | 856 | dstbookmarks = os.path.join(destpath, b'bookmarks') |
|
857 | 857 | if os.path.exists(srcbookmarks): |
|
858 | 858 | util.copyfile(srcbookmarks, dstbookmarks) |
|
859 | 859 | |
|
860 | 860 | dstcachedir = os.path.join(destpath, b'cache') |
|
861 | 861 | for cache in cacheutil.cachetocopy(srcrepo): |
|
862 | 862 | _copycache(srcrepo, dstcachedir, cache) |
|
863 | 863 | |
|
864 | 864 | # we need to re-init the repo after manually copying the data |
|
865 | 865 | # into it |
|
866 | 866 | destpeer = peer(srcrepo, peeropts, dest) |
|
867 | 867 | srcrepo.hook( |
|
868 | 868 | b'outgoing', source=b'clone', node=node.hex(node.nullid) |
|
869 | 869 | ) |
|
870 | 870 | else: |
|
871 | 871 | try: |
|
872 | 872 | # only pass ui when no srcrepo |
|
873 | 873 | destpeer = peer( |
|
874 | 874 | srcrepo or ui, |
|
875 | 875 | peeropts, |
|
876 | 876 | dest, |
|
877 | 877 | create=True, |
|
878 | 878 | createopts=createopts, |
|
879 | 879 | ) |
|
880 | 880 | except OSError as inst: |
|
881 | 881 | if inst.errno == errno.EEXIST: |
|
882 | 882 | cleandir = None |
|
883 | 883 | raise error.Abort( |
|
884 | 884 | _(b"destination '%s' already exists") % dest |
|
885 | 885 | ) |
|
886 | 886 | raise |
|
887 | 887 | |
|
888 | 888 | if revs: |
|
889 | 889 | if not srcpeer.capable(b'lookup'): |
|
890 | 890 | raise error.Abort( |
|
891 | 891 | _( |
|
892 | 892 | b"src repository does not support " |
|
893 | 893 | b"revision lookup and so doesn't " |
|
894 | 894 | b"support clone by revision" |
|
895 | 895 | ) |
|
896 | 896 | ) |
|
897 | 897 | |
|
898 | 898 | # TODO this is batchable. |
|
899 | 899 | remoterevs = [] |
|
900 | 900 | for rev in revs: |
|
901 | 901 | with srcpeer.commandexecutor() as e: |
|
902 | 902 | remoterevs.append( |
|
903 | 903 | e.callcommand(b'lookup', {b'key': rev,}).result() |
|
904 | 904 | ) |
|
905 | 905 | revs = remoterevs |
|
906 | 906 | |
|
907 | 907 | checkout = revs[0] |
|
908 | 908 | else: |
|
909 | 909 | revs = None |
|
910 | 910 | local = destpeer.local() |
|
911 | 911 | if local: |
|
912 | 912 | if narrow: |
|
913 | 913 | with local.wlock(), local.lock(): |
|
914 | 914 | local.setnarrowpats(storeincludepats, storeexcludepats) |
|
915 | 915 | narrowspec.copytoworkingcopy(local) |
|
916 | 916 | |
|
917 | 917 | u = util.url(abspath) |
|
918 | 918 | defaulturl = bytes(u) |
|
919 | 919 | local.ui.setconfig(b'paths', b'default', defaulturl, b'clone') |
|
920 | 920 | if not stream: |
|
921 | 921 | if pull: |
|
922 | 922 | stream = False |
|
923 | 923 | else: |
|
924 | 924 | stream = None |
|
925 | 925 | # internal config: ui.quietbookmarkmove |
|
926 | 926 | overrides = {(b'ui', b'quietbookmarkmove'): True} |
|
927 | 927 | with local.ui.configoverride(overrides, b'clone'): |
|
928 | 928 | exchange.pull( |
|
929 | 929 | local, |
|
930 | 930 | srcpeer, |
|
931 | 931 | revs, |
|
932 | 932 | streamclonerequested=stream, |
|
933 | 933 | includepats=storeincludepats, |
|
934 | 934 | excludepats=storeexcludepats, |
|
935 | 935 | depth=depth, |
|
936 | 936 | ) |
|
937 | 937 | elif srcrepo: |
|
938 | 938 | # TODO lift restriction once exchange.push() accepts narrow |
|
939 | 939 | # push. |
|
940 | 940 | if narrow: |
|
941 | 941 | raise error.Abort( |
|
942 | 942 | _( |
|
943 | 943 | b'narrow clone not available for ' |
|
944 | 944 | b'remote destinations' |
|
945 | 945 | ) |
|
946 | 946 | ) |
|
947 | 947 | |
|
948 | 948 | exchange.push( |
|
949 | 949 | srcrepo, |
|
950 | 950 | destpeer, |
|
951 | 951 | revs=revs, |
|
952 | 952 | bookmarks=srcrepo._bookmarks.keys(), |
|
953 | 953 | ) |
|
954 | 954 | else: |
|
955 | 955 | raise error.Abort( |
|
956 | 956 | _(b"clone from remote to remote not supported") |
|
957 | 957 | ) |
|
958 | 958 | |
|
959 | 959 | cleandir = None |
|
960 | 960 | |
|
961 | 961 | destrepo = destpeer.local() |
|
962 | 962 | if destrepo: |
|
963 | 963 | template = uimod.samplehgrcs[b'cloned'] |
|
964 | 964 | u = util.url(abspath) |
|
965 | 965 | u.passwd = None |
|
966 | 966 | defaulturl = bytes(u) |
|
967 | 967 | destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl)) |
|
968 | 968 | destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone') |
|
969 | 969 | |
|
970 | 970 | if ui.configbool(b'experimental', b'remotenames'): |
|
971 | 971 | logexchange.pullremotenames(destrepo, srcpeer) |
|
972 | 972 | |
|
973 | 973 | if update: |
|
974 | 974 | if update is not True: |
|
975 | 975 | with srcpeer.commandexecutor() as e: |
|
976 | 976 | checkout = e.callcommand( |
|
977 | 977 | b'lookup', {b'key': update,} |
|
978 | 978 | ).result() |
|
979 | 979 | |
|
980 | 980 | uprev = None |
|
981 | 981 | status = None |
|
982 | 982 | if checkout is not None: |
|
983 | 983 | # Some extensions (at least hg-git and hg-subversion) have |
|
984 | 984 | # a peer.lookup() implementation that returns a name instead |
|
985 | 985 | # of a nodeid. We work around it here until we've figured |
|
986 | 986 | # out a better solution. |
|
987 | 987 | if len(checkout) == 20 and checkout in destrepo: |
|
988 | 988 | uprev = checkout |
|
989 | 989 | elif scmutil.isrevsymbol(destrepo, checkout): |
|
990 | 990 | uprev = scmutil.revsymbol(destrepo, checkout).node() |
|
991 | 991 | else: |
|
992 | 992 | if update is not True: |
|
993 | 993 | try: |
|
994 | 994 | uprev = destrepo.lookup(update) |
|
995 | 995 | except error.RepoLookupError: |
|
996 | 996 | pass |
|
997 | 997 | if uprev is None: |
|
998 | 998 | try: |
|
999 | 999 | uprev = destrepo._bookmarks[b'@'] |
|
1000 | 1000 | update = b'@' |
|
1001 | 1001 | bn = destrepo[uprev].branch() |
|
1002 | 1002 | if bn == b'default': |
|
1003 | 1003 | status = _(b"updating to bookmark @\n") |
|
1004 | 1004 | else: |
|
1005 | 1005 | status = ( |
|
1006 | 1006 | _(b"updating to bookmark @ on branch %s\n") % bn |
|
1007 | 1007 | ) |
|
1008 | 1008 | except KeyError: |
|
1009 | 1009 | try: |
|
1010 | 1010 | uprev = destrepo.branchtip(b'default') |
|
1011 | 1011 | except error.RepoLookupError: |
|
1012 | 1012 | uprev = destrepo.lookup(b'tip') |
|
1013 | 1013 | if not status: |
|
1014 | 1014 | bn = destrepo[uprev].branch() |
|
1015 | 1015 | status = _(b"updating to branch %s\n") % bn |
|
1016 | 1016 | destrepo.ui.status(status) |
|
1017 | 1017 | _update(destrepo, uprev) |
|
1018 | 1018 | if update in destrepo._bookmarks: |
|
1019 | 1019 | bookmarks.activate(destrepo, update) |
|
1020 | 1020 | finally: |
|
1021 | 1021 | release(srclock, destlock) |
|
1022 | 1022 | if cleandir is not None: |
|
1023 | 1023 | shutil.rmtree(cleandir, True) |
|
1024 | 1024 | if srcpeer is not None: |
|
1025 | 1025 | srcpeer.close() |
|
1026 | 1026 | return srcpeer, destpeer |
|
1027 | 1027 | |
|
1028 | 1028 | |
|
1029 | 1029 | def _showstats(repo, stats, quietempty=False): |
|
1030 | 1030 | if quietempty and stats.isempty(): |
|
1031 | 1031 | return |
|
1032 | 1032 | repo.ui.status( |
|
1033 | 1033 | _( |
|
1034 | 1034 | b"%d files updated, %d files merged, " |
|
1035 | 1035 | b"%d files removed, %d files unresolved\n" |
|
1036 | 1036 | ) |
|
1037 | 1037 | % ( |
|
1038 | 1038 | stats.updatedcount, |
|
1039 | 1039 | stats.mergedcount, |
|
1040 | 1040 | stats.removedcount, |
|
1041 | 1041 | stats.unresolvedcount, |
|
1042 | 1042 | ) |
|
1043 | 1043 | ) |
|
1044 | 1044 | |
|
1045 | 1045 | |
|
1046 | 1046 | def updaterepo(repo, node, overwrite, updatecheck=None): |
|
1047 | 1047 | """Update the working directory to node. |
|
1048 | 1048 | |
|
1049 | 1049 | When overwrite is set, changes are clobbered, merged else |
|
1050 | 1050 | |
|
1051 | 1051 | returns stats (see pydoc mercurial.merge.applyupdates)""" |
|
1052 | 1052 | return mergemod._update( |
|
1053 | 1053 | repo, |
|
1054 | 1054 | node, |
|
1055 | 1055 | branchmerge=False, |
|
1056 | 1056 | force=overwrite, |
|
1057 | 1057 | labels=[b'working copy', b'destination'], |
|
1058 | 1058 | updatecheck=updatecheck, |
|
1059 | 1059 | ) |
|
1060 | 1060 | |
|
1061 | 1061 | |
|
1062 | 1062 | def update(repo, node, quietempty=False, updatecheck=None): |
|
1063 | 1063 | """update the working directory to node""" |
|
1064 |
stats = update |
|
|
1064 | stats = mergemod.update(repo[node], updatecheck=updatecheck) | |
|
1065 | 1065 | _showstats(repo, stats, quietempty) |
|
1066 | 1066 | if stats.unresolvedcount: |
|
1067 | 1067 | repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n")) |
|
1068 | 1068 | return stats.unresolvedcount > 0 |
|
1069 | 1069 | |
|
1070 | 1070 | |
|
1071 | 1071 | # naming conflict in clone() |
|
1072 | 1072 | _update = update |
|
1073 | 1073 | |
|
1074 | 1074 | |
|
1075 | 1075 | def clean(repo, node, show_stats=True, quietempty=False): |
|
1076 | 1076 | """forcibly switch the working directory to node, clobbering changes""" |
|
1077 | 1077 | stats = mergemod.clean_update(repo[node]) |
|
1078 | 1078 | assert stats.unresolvedcount == 0 |
|
1079 | 1079 | if show_stats: |
|
1080 | 1080 | _showstats(repo, stats, quietempty) |
|
1081 | 1081 | |
|
1082 | 1082 | |
|
1083 | 1083 | # naming conflict in updatetotally() |
|
1084 | 1084 | _clean = clean |
|
1085 | 1085 | |
|
1086 | 1086 | _VALID_UPDATECHECKS = { |
|
1087 | 1087 | mergemod.UPDATECHECK_ABORT, |
|
1088 | 1088 | mergemod.UPDATECHECK_NONE, |
|
1089 | 1089 | mergemod.UPDATECHECK_LINEAR, |
|
1090 | 1090 | mergemod.UPDATECHECK_NO_CONFLICT, |
|
1091 | 1091 | } |
|
1092 | 1092 | |
|
1093 | 1093 | |
|
1094 | 1094 | def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): |
|
1095 | 1095 | """Update the working directory with extra care for non-file components |
|
1096 | 1096 | |
|
1097 | 1097 | This takes care of non-file components below: |
|
1098 | 1098 | |
|
1099 | 1099 | :bookmark: might be advanced or (in)activated |
|
1100 | 1100 | |
|
1101 | 1101 | This takes arguments below: |
|
1102 | 1102 | |
|
1103 | 1103 | :checkout: to which revision the working directory is updated |
|
1104 | 1104 | :brev: a name, which might be a bookmark to be activated after updating |
|
1105 | 1105 | :clean: whether changes in the working directory can be discarded |
|
1106 | 1106 | :updatecheck: how to deal with a dirty working directory |
|
1107 | 1107 | |
|
1108 | 1108 | Valid values for updatecheck are the UPDATECHECK_* constants |
|
1109 | 1109 | defined in the merge module. Passing `None` will result in using the |
|
1110 | 1110 | configured default. |
|
1111 | 1111 | |
|
1112 | 1112 | * ABORT: abort if the working directory is dirty |
|
1113 | 1113 | * NONE: don't check (merge working directory changes into destination) |
|
1114 | 1114 | * LINEAR: check that update is linear before merging working directory |
|
1115 | 1115 | changes into destination |
|
1116 | 1116 | * NO_CONFLICT: check that the update does not result in file merges |
|
1117 | 1117 | |
|
1118 | 1118 | This returns whether conflict is detected at updating or not. |
|
1119 | 1119 | """ |
|
1120 | 1120 | if updatecheck is None: |
|
1121 | 1121 | updatecheck = ui.config(b'commands', b'update.check') |
|
1122 | 1122 | if updatecheck not in _VALID_UPDATECHECKS: |
|
1123 | 1123 | # If not configured, or invalid value configured |
|
1124 | 1124 | updatecheck = mergemod.UPDATECHECK_LINEAR |
|
1125 | 1125 | if updatecheck not in _VALID_UPDATECHECKS: |
|
1126 | 1126 | raise ValueError( |
|
1127 | 1127 | r'Invalid updatecheck value %r (can accept %r)' |
|
1128 | 1128 | % (updatecheck, _VALID_UPDATECHECKS) |
|
1129 | 1129 | ) |
|
1130 | 1130 | with repo.wlock(): |
|
1131 | 1131 | movemarkfrom = None |
|
1132 | 1132 | warndest = False |
|
1133 | 1133 | if checkout is None: |
|
1134 | 1134 | updata = destutil.destupdate(repo, clean=clean) |
|
1135 | 1135 | checkout, movemarkfrom, brev = updata |
|
1136 | 1136 | warndest = True |
|
1137 | 1137 | |
|
1138 | 1138 | if clean: |
|
1139 | 1139 | ret = _clean(repo, checkout) |
|
1140 | 1140 | else: |
|
1141 | 1141 | if updatecheck == mergemod.UPDATECHECK_ABORT: |
|
1142 | 1142 | cmdutil.bailifchanged(repo, merge=False) |
|
1143 | 1143 | updatecheck = mergemod.UPDATECHECK_NONE |
|
1144 | 1144 | ret = _update(repo, checkout, updatecheck=updatecheck) |
|
1145 | 1145 | |
|
1146 | 1146 | if not ret and movemarkfrom: |
|
1147 | 1147 | if movemarkfrom == repo[b'.'].node(): |
|
1148 | 1148 | pass # no-op update |
|
1149 | 1149 | elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()): |
|
1150 | 1150 | b = ui.label(repo._activebookmark, b'bookmarks.active') |
|
1151 | 1151 | ui.status(_(b"updating bookmark %s\n") % b) |
|
1152 | 1152 | else: |
|
1153 | 1153 | # this can happen with a non-linear update |
|
1154 | 1154 | b = ui.label(repo._activebookmark, b'bookmarks') |
|
1155 | 1155 | ui.status(_(b"(leaving bookmark %s)\n") % b) |
|
1156 | 1156 | bookmarks.deactivate(repo) |
|
1157 | 1157 | elif brev in repo._bookmarks: |
|
1158 | 1158 | if brev != repo._activebookmark: |
|
1159 | 1159 | b = ui.label(brev, b'bookmarks.active') |
|
1160 | 1160 | ui.status(_(b"(activating bookmark %s)\n") % b) |
|
1161 | 1161 | bookmarks.activate(repo, brev) |
|
1162 | 1162 | elif brev: |
|
1163 | 1163 | if repo._activebookmark: |
|
1164 | 1164 | b = ui.label(repo._activebookmark, b'bookmarks') |
|
1165 | 1165 | ui.status(_(b"(leaving bookmark %s)\n") % b) |
|
1166 | 1166 | bookmarks.deactivate(repo) |
|
1167 | 1167 | |
|
1168 | 1168 | if warndest: |
|
1169 | 1169 | destutil.statusotherdests(ui, repo) |
|
1170 | 1170 | |
|
1171 | 1171 | return ret |
|
1172 | 1172 | |
|
1173 | 1173 | |
|
1174 | 1174 | def merge( |
|
1175 | 1175 | ctx, force=False, remind=True, labels=None, |
|
1176 | 1176 | ): |
|
1177 | 1177 | """Branch merge with node, resolving changes. Return true if any |
|
1178 | 1178 | unresolved conflicts.""" |
|
1179 | 1179 | repo = ctx.repo() |
|
1180 | 1180 | stats = mergemod.merge(ctx, force=force, labels=labels) |
|
1181 | 1181 | _showstats(repo, stats) |
|
1182 | 1182 | if stats.unresolvedcount: |
|
1183 | 1183 | repo.ui.status( |
|
1184 | 1184 | _( |
|
1185 | 1185 | b"use 'hg resolve' to retry unresolved file merges " |
|
1186 | 1186 | b"or 'hg merge --abort' to abandon\n" |
|
1187 | 1187 | ) |
|
1188 | 1188 | ) |
|
1189 | 1189 | elif remind: |
|
1190 | 1190 | repo.ui.status(_(b"(branch merge, don't forget to commit)\n")) |
|
1191 | 1191 | return stats.unresolvedcount > 0 |
|
1192 | 1192 | |
|
1193 | 1193 | |
|
1194 | 1194 | def abortmerge(ui, repo): |
|
1195 | 1195 | ms = mergestatemod.mergestate.read(repo) |
|
1196 | 1196 | if ms.active(): |
|
1197 | 1197 | # there were conflicts |
|
1198 | 1198 | node = ms.localctx.hex() |
|
1199 | 1199 | else: |
|
1200 | 1200 | # there were no conficts, mergestate was not stored |
|
1201 | 1201 | node = repo[b'.'].hex() |
|
1202 | 1202 | |
|
1203 | 1203 | repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12]) |
|
1204 | 1204 | stats = mergemod.clean_update(repo[node]) |
|
1205 | 1205 | assert stats.unresolvedcount == 0 |
|
1206 | 1206 | _showstats(repo, stats) |
|
1207 | 1207 | |
|
1208 | 1208 | |
|
1209 | 1209 | def _incoming( |
|
1210 | 1210 | displaychlist, subreporecurse, ui, repo, source, opts, buffered=False |
|
1211 | 1211 | ): |
|
1212 | 1212 | """ |
|
1213 | 1213 | Helper for incoming / gincoming. |
|
1214 | 1214 | displaychlist gets called with |
|
1215 | 1215 | (remoterepo, incomingchangesetlist, displayer) parameters, |
|
1216 | 1216 | and is supposed to contain only code that can't be unified. |
|
1217 | 1217 | """ |
|
1218 | 1218 | source, branches = parseurl(ui.expandpath(source), opts.get(b'branch')) |
|
1219 | 1219 | other = peer(repo, opts, source) |
|
1220 | 1220 | ui.status(_(b'comparing with %s\n') % util.hidepassword(source)) |
|
1221 | 1221 | revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev')) |
|
1222 | 1222 | |
|
1223 | 1223 | if revs: |
|
1224 | 1224 | revs = [other.lookup(rev) for rev in revs] |
|
1225 | 1225 | other, chlist, cleanupfn = bundlerepo.getremotechanges( |
|
1226 | 1226 | ui, repo, other, revs, opts[b"bundle"], opts[b"force"] |
|
1227 | 1227 | ) |
|
1228 | 1228 | try: |
|
1229 | 1229 | if not chlist: |
|
1230 | 1230 | ui.status(_(b"no changes found\n")) |
|
1231 | 1231 | return subreporecurse() |
|
1232 | 1232 | ui.pager(b'incoming') |
|
1233 | 1233 | displayer = logcmdutil.changesetdisplayer( |
|
1234 | 1234 | ui, other, opts, buffered=buffered |
|
1235 | 1235 | ) |
|
1236 | 1236 | displaychlist(other, chlist, displayer) |
|
1237 | 1237 | displayer.close() |
|
1238 | 1238 | finally: |
|
1239 | 1239 | cleanupfn() |
|
1240 | 1240 | subreporecurse() |
|
1241 | 1241 | return 0 # exit code is zero since we found incoming changes |
|
1242 | 1242 | |
|
1243 | 1243 | |
|
1244 | 1244 | def incoming(ui, repo, source, opts): |
|
1245 | 1245 | def subreporecurse(): |
|
1246 | 1246 | ret = 1 |
|
1247 | 1247 | if opts.get(b'subrepos'): |
|
1248 | 1248 | ctx = repo[None] |
|
1249 | 1249 | for subpath in sorted(ctx.substate): |
|
1250 | 1250 | sub = ctx.sub(subpath) |
|
1251 | 1251 | ret = min(ret, sub.incoming(ui, source, opts)) |
|
1252 | 1252 | return ret |
|
1253 | 1253 | |
|
1254 | 1254 | def display(other, chlist, displayer): |
|
1255 | 1255 | limit = logcmdutil.getlimit(opts) |
|
1256 | 1256 | if opts.get(b'newest_first'): |
|
1257 | 1257 | chlist.reverse() |
|
1258 | 1258 | count = 0 |
|
1259 | 1259 | for n in chlist: |
|
1260 | 1260 | if limit is not None and count >= limit: |
|
1261 | 1261 | break |
|
1262 | 1262 | parents = [p for p in other.changelog.parents(n) if p != nullid] |
|
1263 | 1263 | if opts.get(b'no_merges') and len(parents) == 2: |
|
1264 | 1264 | continue |
|
1265 | 1265 | count += 1 |
|
1266 | 1266 | displayer.show(other[n]) |
|
1267 | 1267 | |
|
1268 | 1268 | return _incoming(display, subreporecurse, ui, repo, source, opts) |
|
1269 | 1269 | |
|
1270 | 1270 | |
|
1271 | 1271 | def _outgoing(ui, repo, dest, opts): |
|
1272 | 1272 | path = ui.paths.getpath(dest, default=(b'default-push', b'default')) |
|
1273 | 1273 | if not path: |
|
1274 | 1274 | raise error.Abort( |
|
1275 | 1275 | _(b'default repository not configured!'), |
|
1276 | 1276 | hint=_(b"see 'hg help config.paths'"), |
|
1277 | 1277 | ) |
|
1278 | 1278 | dest = path.pushloc or path.loc |
|
1279 | 1279 | branches = path.branch, opts.get(b'branch') or [] |
|
1280 | 1280 | |
|
1281 | 1281 | ui.status(_(b'comparing with %s\n') % util.hidepassword(dest)) |
|
1282 | 1282 | revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev')) |
|
1283 | 1283 | if revs: |
|
1284 | 1284 | revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] |
|
1285 | 1285 | |
|
1286 | 1286 | other = peer(repo, opts, dest) |
|
1287 | 1287 | outgoing = discovery.findcommonoutgoing( |
|
1288 | 1288 | repo, other, revs, force=opts.get(b'force') |
|
1289 | 1289 | ) |
|
1290 | 1290 | o = outgoing.missing |
|
1291 | 1291 | if not o: |
|
1292 | 1292 | scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) |
|
1293 | 1293 | return o, other |
|
1294 | 1294 | |
|
1295 | 1295 | |
|
1296 | 1296 | def outgoing(ui, repo, dest, opts): |
|
1297 | 1297 | def recurse(): |
|
1298 | 1298 | ret = 1 |
|
1299 | 1299 | if opts.get(b'subrepos'): |
|
1300 | 1300 | ctx = repo[None] |
|
1301 | 1301 | for subpath in sorted(ctx.substate): |
|
1302 | 1302 | sub = ctx.sub(subpath) |
|
1303 | 1303 | ret = min(ret, sub.outgoing(ui, dest, opts)) |
|
1304 | 1304 | return ret |
|
1305 | 1305 | |
|
1306 | 1306 | limit = logcmdutil.getlimit(opts) |
|
1307 | 1307 | o, other = _outgoing(ui, repo, dest, opts) |
|
1308 | 1308 | if not o: |
|
1309 | 1309 | cmdutil.outgoinghooks(ui, repo, other, opts, o) |
|
1310 | 1310 | return recurse() |
|
1311 | 1311 | |
|
1312 | 1312 | if opts.get(b'newest_first'): |
|
1313 | 1313 | o.reverse() |
|
1314 | 1314 | ui.pager(b'outgoing') |
|
1315 | 1315 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
1316 | 1316 | count = 0 |
|
1317 | 1317 | for n in o: |
|
1318 | 1318 | if limit is not None and count >= limit: |
|
1319 | 1319 | break |
|
1320 | 1320 | parents = [p for p in repo.changelog.parents(n) if p != nullid] |
|
1321 | 1321 | if opts.get(b'no_merges') and len(parents) == 2: |
|
1322 | 1322 | continue |
|
1323 | 1323 | count += 1 |
|
1324 | 1324 | displayer.show(repo[n]) |
|
1325 | 1325 | displayer.close() |
|
1326 | 1326 | cmdutil.outgoinghooks(ui, repo, other, opts, o) |
|
1327 | 1327 | recurse() |
|
1328 | 1328 | return 0 # exit code is zero since we found outgoing changes |
|
1329 | 1329 | |
|
1330 | 1330 | |
|
1331 | 1331 | def verify(repo, level=None): |
|
1332 | 1332 | """verify the consistency of a repository""" |
|
1333 | 1333 | ret = verifymod.verify(repo, level=level) |
|
1334 | 1334 | |
|
1335 | 1335 | # Broken subrepo references in hidden csets don't seem worth worrying about, |
|
1336 | 1336 | # since they can't be pushed/pulled, and --hidden can be used if they are a |
|
1337 | 1337 | # concern. |
|
1338 | 1338 | |
|
1339 | 1339 | # pathto() is needed for -R case |
|
1340 | 1340 | revs = repo.revs( |
|
1341 | 1341 | b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate') |
|
1342 | 1342 | ) |
|
1343 | 1343 | |
|
1344 | 1344 | if revs: |
|
1345 | 1345 | repo.ui.status(_(b'checking subrepo links\n')) |
|
1346 | 1346 | for rev in revs: |
|
1347 | 1347 | ctx = repo[rev] |
|
1348 | 1348 | try: |
|
1349 | 1349 | for subpath in ctx.substate: |
|
1350 | 1350 | try: |
|
1351 | 1351 | ret = ( |
|
1352 | 1352 | ctx.sub(subpath, allowcreate=False).verify() or ret |
|
1353 | 1353 | ) |
|
1354 | 1354 | except error.RepoError as e: |
|
1355 | 1355 | repo.ui.warn(b'%d: %s\n' % (rev, e)) |
|
1356 | 1356 | except Exception: |
|
1357 | 1357 | repo.ui.warn( |
|
1358 | 1358 | _(b'.hgsubstate is corrupt in revision %s\n') |
|
1359 | 1359 | % node.short(ctx.node()) |
|
1360 | 1360 | ) |
|
1361 | 1361 | |
|
1362 | 1362 | return ret |
|
1363 | 1363 | |
|
1364 | 1364 | |
|
1365 | 1365 | def remoteui(src, opts): |
|
1366 | 1366 | """build a remote ui from ui or repo and opts""" |
|
1367 | 1367 | if util.safehasattr(src, b'baseui'): # looks like a repository |
|
1368 | 1368 | dst = src.baseui.copy() # drop repo-specific config |
|
1369 | 1369 | src = src.ui # copy target options from repo |
|
1370 | 1370 | else: # assume it's a global ui object |
|
1371 | 1371 | dst = src.copy() # keep all global options |
|
1372 | 1372 | |
|
1373 | 1373 | # copy ssh-specific options |
|
1374 | 1374 | for o in b'ssh', b'remotecmd': |
|
1375 | 1375 | v = opts.get(o) or src.config(b'ui', o) |
|
1376 | 1376 | if v: |
|
1377 | 1377 | dst.setconfig(b"ui", o, v, b'copied') |
|
1378 | 1378 | |
|
1379 | 1379 | # copy bundle-specific options |
|
1380 | 1380 | r = src.config(b'bundle', b'mainreporoot') |
|
1381 | 1381 | if r: |
|
1382 | 1382 | dst.setconfig(b'bundle', b'mainreporoot', r, b'copied') |
|
1383 | 1383 | |
|
1384 | 1384 | # copy selected local settings to the remote ui |
|
1385 | 1385 | for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'): |
|
1386 | 1386 | for key, val in src.configitems(sect): |
|
1387 | 1387 | dst.setconfig(sect, key, val, b'copied') |
|
1388 | 1388 | v = src.config(b'web', b'cacerts') |
|
1389 | 1389 | if v: |
|
1390 | 1390 | dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied') |
|
1391 | 1391 | |
|
1392 | 1392 | return dst |
|
1393 | 1393 | |
|
1394 | 1394 | |
|
1395 | 1395 | # Files of interest |
|
1396 | 1396 | # Used to check if the repository has changed looking at mtime and size of |
|
1397 | 1397 | # these files. |
|
1398 | 1398 | foi = [ |
|
1399 | 1399 | (b'spath', b'00changelog.i'), |
|
1400 | 1400 | (b'spath', b'phaseroots'), # ! phase can change content at the same size |
|
1401 | 1401 | (b'spath', b'obsstore'), |
|
1402 | 1402 | (b'path', b'bookmarks'), # ! bookmark can change content at the same size |
|
1403 | 1403 | ] |
|
1404 | 1404 | |
|
1405 | 1405 | |
|
1406 | 1406 | class cachedlocalrepo(object): |
|
1407 | 1407 | """Holds a localrepository that can be cached and reused.""" |
|
1408 | 1408 | |
|
1409 | 1409 | def __init__(self, repo): |
|
1410 | 1410 | """Create a new cached repo from an existing repo. |
|
1411 | 1411 | |
|
1412 | 1412 | We assume the passed in repo was recently created. If the |
|
1413 | 1413 | repo has changed between when it was created and when it was |
|
1414 | 1414 | turned into a cache, it may not refresh properly. |
|
1415 | 1415 | """ |
|
1416 | 1416 | assert isinstance(repo, localrepo.localrepository) |
|
1417 | 1417 | self._repo = repo |
|
1418 | 1418 | self._state, self.mtime = self._repostate() |
|
1419 | 1419 | self._filtername = repo.filtername |
|
1420 | 1420 | |
|
1421 | 1421 | def fetch(self): |
|
1422 | 1422 | """Refresh (if necessary) and return a repository. |
|
1423 | 1423 | |
|
1424 | 1424 | If the cached instance is out of date, it will be recreated |
|
1425 | 1425 | automatically and returned. |
|
1426 | 1426 | |
|
1427 | 1427 | Returns a tuple of the repo and a boolean indicating whether a new |
|
1428 | 1428 | repo instance was created. |
|
1429 | 1429 | """ |
|
1430 | 1430 | # We compare the mtimes and sizes of some well-known files to |
|
1431 | 1431 | # determine if the repo changed. This is not precise, as mtimes |
|
1432 | 1432 | # are susceptible to clock skew and imprecise filesystems and |
|
1433 | 1433 | # file content can change while maintaining the same size. |
|
1434 | 1434 | |
|
1435 | 1435 | state, mtime = self._repostate() |
|
1436 | 1436 | if state == self._state: |
|
1437 | 1437 | return self._repo, False |
|
1438 | 1438 | |
|
1439 | 1439 | repo = repository(self._repo.baseui, self._repo.url()) |
|
1440 | 1440 | if self._filtername: |
|
1441 | 1441 | self._repo = repo.filtered(self._filtername) |
|
1442 | 1442 | else: |
|
1443 | 1443 | self._repo = repo.unfiltered() |
|
1444 | 1444 | self._state = state |
|
1445 | 1445 | self.mtime = mtime |
|
1446 | 1446 | |
|
1447 | 1447 | return self._repo, True |
|
1448 | 1448 | |
|
1449 | 1449 | def _repostate(self): |
|
1450 | 1450 | state = [] |
|
1451 | 1451 | maxmtime = -1 |
|
1452 | 1452 | for attr, fname in foi: |
|
1453 | 1453 | prefix = getattr(self._repo, attr) |
|
1454 | 1454 | p = os.path.join(prefix, fname) |
|
1455 | 1455 | try: |
|
1456 | 1456 | st = os.stat(p) |
|
1457 | 1457 | except OSError: |
|
1458 | 1458 | st = os.stat(prefix) |
|
1459 | 1459 | state.append((st[stat.ST_MTIME], st.st_size)) |
|
1460 | 1460 | maxmtime = max(maxmtime, st[stat.ST_MTIME]) |
|
1461 | 1461 | |
|
1462 | 1462 | return tuple(state), maxmtime |
|
1463 | 1463 | |
|
1464 | 1464 | def copy(self): |
|
1465 | 1465 | """Obtain a copy of this class instance. |
|
1466 | 1466 | |
|
1467 | 1467 | A new localrepository instance is obtained. The new instance should be |
|
1468 | 1468 | completely independent of the original. |
|
1469 | 1469 | """ |
|
1470 | 1470 | repo = repository(self._repo.baseui, self._repo.origroot) |
|
1471 | 1471 | if self._filtername: |
|
1472 | 1472 | repo = repo.filtered(self._filtername) |
|
1473 | 1473 | else: |
|
1474 | 1474 | repo = repo.unfiltered() |
|
1475 | 1475 | c = cachedlocalrepo(repo) |
|
1476 | 1476 | c._state = self._state |
|
1477 | 1477 | c.mtime = self.mtime |
|
1478 | 1478 | return c |
@@ -1,1174 +1,1174 b'' | |||
|
1 | 1 | # shelve.py - save/restore working directory state |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """save and restore changes to the working directory |
|
9 | 9 | |
|
10 | 10 | The "hg shelve" command saves changes made to the working directory |
|
11 | 11 | and reverts those changes, resetting the working directory to a clean |
|
12 | 12 | state. |
|
13 | 13 | |
|
14 | 14 | Later on, the "hg unshelve" command restores the changes saved by "hg |
|
15 | 15 | shelve". Changes can be restored even after updating to a different |
|
16 | 16 | parent, in which case Mercurial's merge machinery will resolve any |
|
17 | 17 | conflicts if necessary. |
|
18 | 18 | |
|
19 | 19 | You can have more than one shelved change outstanding at a time; each |
|
20 | 20 | shelved change has a distinct name. For details, see the help for "hg |
|
21 | 21 | shelve". |
|
22 | 22 | """ |
|
23 | 23 | from __future__ import absolute_import |
|
24 | 24 | |
|
25 | 25 | import collections |
|
26 | 26 | import errno |
|
27 | 27 | import itertools |
|
28 | 28 | import stat |
|
29 | 29 | |
|
30 | 30 | from .i18n import _ |
|
31 | 31 | from .pycompat import open |
|
32 | 32 | from . import ( |
|
33 | 33 | bookmarks, |
|
34 | 34 | bundle2, |
|
35 | 35 | bundlerepo, |
|
36 | 36 | changegroup, |
|
37 | 37 | cmdutil, |
|
38 | 38 | discovery, |
|
39 | 39 | error, |
|
40 | 40 | exchange, |
|
41 | 41 | hg, |
|
42 | 42 | lock as lockmod, |
|
43 | 43 | mdiff, |
|
44 | 44 | merge, |
|
45 | 45 | mergestate as mergestatemod, |
|
46 | 46 | node as nodemod, |
|
47 | 47 | patch, |
|
48 | 48 | phases, |
|
49 | 49 | pycompat, |
|
50 | 50 | repair, |
|
51 | 51 | scmutil, |
|
52 | 52 | templatefilters, |
|
53 | 53 | util, |
|
54 | 54 | vfs as vfsmod, |
|
55 | 55 | ) |
|
56 | 56 | from .utils import ( |
|
57 | 57 | dateutil, |
|
58 | 58 | stringutil, |
|
59 | 59 | ) |
|
60 | 60 | |
|
61 | 61 | backupdir = b'shelve-backup' |
|
62 | 62 | shelvedir = b'shelved' |
|
63 | 63 | shelvefileextensions = [b'hg', b'patch', b'shelve'] |
|
64 | 64 | # universal extension is present in all types of shelves |
|
65 | 65 | patchextension = b'patch' |
|
66 | 66 | |
|
67 | 67 | # we never need the user, so we use a |
|
68 | 68 | # generic user for all shelve operations |
|
69 | 69 | shelveuser = b'shelve@localhost' |
|
70 | 70 | |
|
71 | 71 | |
|
72 | 72 | class shelvedfile(object): |
|
73 | 73 | """Helper for the file storing a single shelve |
|
74 | 74 | |
|
75 | 75 | Handles common functions on shelve files (.hg/.patch) using |
|
76 | 76 | the vfs layer""" |
|
77 | 77 | |
|
78 | 78 | def __init__(self, repo, name, filetype=None): |
|
79 | 79 | self.repo = repo |
|
80 | 80 | self.name = name |
|
81 | 81 | self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir)) |
|
82 | 82 | self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir)) |
|
83 | 83 | self.ui = self.repo.ui |
|
84 | 84 | if filetype: |
|
85 | 85 | self.fname = name + b'.' + filetype |
|
86 | 86 | else: |
|
87 | 87 | self.fname = name |
|
88 | 88 | |
|
89 | 89 | def exists(self): |
|
90 | 90 | return self.vfs.exists(self.fname) |
|
91 | 91 | |
|
92 | 92 | def filename(self): |
|
93 | 93 | return self.vfs.join(self.fname) |
|
94 | 94 | |
|
95 | 95 | def backupfilename(self): |
|
96 | 96 | def gennames(base): |
|
97 | 97 | yield base |
|
98 | 98 | base, ext = base.rsplit(b'.', 1) |
|
99 | 99 | for i in itertools.count(1): |
|
100 | 100 | yield b'%s-%d.%s' % (base, i, ext) |
|
101 | 101 | |
|
102 | 102 | name = self.backupvfs.join(self.fname) |
|
103 | 103 | for n in gennames(name): |
|
104 | 104 | if not self.backupvfs.exists(n): |
|
105 | 105 | return n |
|
106 | 106 | |
|
107 | 107 | def movetobackup(self): |
|
108 | 108 | if not self.backupvfs.isdir(): |
|
109 | 109 | self.backupvfs.makedir() |
|
110 | 110 | util.rename(self.filename(), self.backupfilename()) |
|
111 | 111 | |
|
112 | 112 | def stat(self): |
|
113 | 113 | return self.vfs.stat(self.fname) |
|
114 | 114 | |
|
115 | 115 | def opener(self, mode=b'rb'): |
|
116 | 116 | try: |
|
117 | 117 | return self.vfs(self.fname, mode) |
|
118 | 118 | except IOError as err: |
|
119 | 119 | if err.errno != errno.ENOENT: |
|
120 | 120 | raise |
|
121 | 121 | raise error.Abort(_(b"shelved change '%s' not found") % self.name) |
|
122 | 122 | |
|
123 | 123 | def applybundle(self, tr): |
|
124 | 124 | fp = self.opener() |
|
125 | 125 | try: |
|
126 | 126 | targetphase = phases.internal |
|
127 | 127 | if not phases.supportinternal(self.repo): |
|
128 | 128 | targetphase = phases.secret |
|
129 | 129 | gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs) |
|
130 | 130 | pretip = self.repo[b'tip'] |
|
131 | 131 | bundle2.applybundle( |
|
132 | 132 | self.repo, |
|
133 | 133 | gen, |
|
134 | 134 | tr, |
|
135 | 135 | source=b'unshelve', |
|
136 | 136 | url=b'bundle:' + self.vfs.join(self.fname), |
|
137 | 137 | targetphase=targetphase, |
|
138 | 138 | ) |
|
139 | 139 | shelvectx = self.repo[b'tip'] |
|
140 | 140 | if pretip == shelvectx: |
|
141 | 141 | shelverev = tr.changes[b'revduplicates'][-1] |
|
142 | 142 | shelvectx = self.repo[shelverev] |
|
143 | 143 | return shelvectx |
|
144 | 144 | finally: |
|
145 | 145 | fp.close() |
|
146 | 146 | |
|
147 | 147 | def bundlerepo(self): |
|
148 | 148 | path = self.vfs.join(self.fname) |
|
149 | 149 | return bundlerepo.instance( |
|
150 | 150 | self.repo.baseui, b'bundle://%s+%s' % (self.repo.root, path), False |
|
151 | 151 | ) |
|
152 | 152 | |
|
153 | 153 | def writebundle(self, bases, node): |
|
154 | 154 | cgversion = changegroup.safeversion(self.repo) |
|
155 | 155 | if cgversion == b'01': |
|
156 | 156 | btype = b'HG10BZ' |
|
157 | 157 | compression = None |
|
158 | 158 | else: |
|
159 | 159 | btype = b'HG20' |
|
160 | 160 | compression = b'BZ' |
|
161 | 161 | |
|
162 | 162 | repo = self.repo.unfiltered() |
|
163 | 163 | |
|
164 | 164 | outgoing = discovery.outgoing( |
|
165 | 165 | repo, missingroots=bases, ancestorsof=[node] |
|
166 | 166 | ) |
|
167 | 167 | cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve') |
|
168 | 168 | |
|
169 | 169 | bundle2.writebundle( |
|
170 | 170 | self.ui, cg, self.fname, btype, self.vfs, compression=compression |
|
171 | 171 | ) |
|
172 | 172 | |
|
173 | 173 | def writeinfo(self, info): |
|
174 | 174 | scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info) |
|
175 | 175 | |
|
176 | 176 | def readinfo(self): |
|
177 | 177 | return scmutil.simplekeyvaluefile(self.vfs, self.fname).read() |
|
178 | 178 | |
|
179 | 179 | |
|
180 | 180 | class shelvedstate(object): |
|
181 | 181 | """Handle persistence during unshelving operations. |
|
182 | 182 | |
|
183 | 183 | Handles saving and restoring a shelved state. Ensures that different |
|
184 | 184 | versions of a shelved state are possible and handles them appropriately. |
|
185 | 185 | """ |
|
186 | 186 | |
|
187 | 187 | _version = 2 |
|
188 | 188 | _filename = b'shelvedstate' |
|
189 | 189 | _keep = b'keep' |
|
190 | 190 | _nokeep = b'nokeep' |
|
191 | 191 | # colon is essential to differentiate from a real bookmark name |
|
192 | 192 | _noactivebook = b':no-active-bookmark' |
|
193 | 193 | _interactive = b'interactive' |
|
194 | 194 | |
|
195 | 195 | @classmethod |
|
196 | 196 | def _verifyandtransform(cls, d): |
|
197 | 197 | """Some basic shelvestate syntactic verification and transformation""" |
|
198 | 198 | try: |
|
199 | 199 | d[b'originalwctx'] = nodemod.bin(d[b'originalwctx']) |
|
200 | 200 | d[b'pendingctx'] = nodemod.bin(d[b'pendingctx']) |
|
201 | 201 | d[b'parents'] = [nodemod.bin(h) for h in d[b'parents'].split(b' ')] |
|
202 | 202 | d[b'nodestoremove'] = [ |
|
203 | 203 | nodemod.bin(h) for h in d[b'nodestoremove'].split(b' ') |
|
204 | 204 | ] |
|
205 | 205 | except (ValueError, TypeError, KeyError) as err: |
|
206 | 206 | raise error.CorruptedState(pycompat.bytestr(err)) |
|
207 | 207 | |
|
208 | 208 | @classmethod |
|
209 | 209 | def _getversion(cls, repo): |
|
210 | 210 | """Read version information from shelvestate file""" |
|
211 | 211 | fp = repo.vfs(cls._filename) |
|
212 | 212 | try: |
|
213 | 213 | version = int(fp.readline().strip()) |
|
214 | 214 | except ValueError as err: |
|
215 | 215 | raise error.CorruptedState(pycompat.bytestr(err)) |
|
216 | 216 | finally: |
|
217 | 217 | fp.close() |
|
218 | 218 | return version |
|
219 | 219 | |
|
220 | 220 | @classmethod |
|
221 | 221 | def _readold(cls, repo): |
|
222 | 222 | """Read the old position-based version of a shelvestate file""" |
|
223 | 223 | # Order is important, because old shelvestate file uses it |
|
224 | 224 | # to detemine values of fields (i.g. name is on the second line, |
|
225 | 225 | # originalwctx is on the third and so forth). Please do not change. |
|
226 | 226 | keys = [ |
|
227 | 227 | b'version', |
|
228 | 228 | b'name', |
|
229 | 229 | b'originalwctx', |
|
230 | 230 | b'pendingctx', |
|
231 | 231 | b'parents', |
|
232 | 232 | b'nodestoremove', |
|
233 | 233 | b'branchtorestore', |
|
234 | 234 | b'keep', |
|
235 | 235 | b'activebook', |
|
236 | 236 | ] |
|
237 | 237 | # this is executed only seldomly, so it is not a big deal |
|
238 | 238 | # that we open this file twice |
|
239 | 239 | fp = repo.vfs(cls._filename) |
|
240 | 240 | d = {} |
|
241 | 241 | try: |
|
242 | 242 | for key in keys: |
|
243 | 243 | d[key] = fp.readline().strip() |
|
244 | 244 | finally: |
|
245 | 245 | fp.close() |
|
246 | 246 | return d |
|
247 | 247 | |
|
248 | 248 | @classmethod |
|
249 | 249 | def load(cls, repo): |
|
250 | 250 | version = cls._getversion(repo) |
|
251 | 251 | if version < cls._version: |
|
252 | 252 | d = cls._readold(repo) |
|
253 | 253 | elif version == cls._version: |
|
254 | 254 | d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename).read( |
|
255 | 255 | firstlinenonkeyval=True |
|
256 | 256 | ) |
|
257 | 257 | else: |
|
258 | 258 | raise error.Abort( |
|
259 | 259 | _( |
|
260 | 260 | b'this version of shelve is incompatible ' |
|
261 | 261 | b'with the version used in this repo' |
|
262 | 262 | ) |
|
263 | 263 | ) |
|
264 | 264 | |
|
265 | 265 | cls._verifyandtransform(d) |
|
266 | 266 | try: |
|
267 | 267 | obj = cls() |
|
268 | 268 | obj.name = d[b'name'] |
|
269 | 269 | obj.wctx = repo[d[b'originalwctx']] |
|
270 | 270 | obj.pendingctx = repo[d[b'pendingctx']] |
|
271 | 271 | obj.parents = d[b'parents'] |
|
272 | 272 | obj.nodestoremove = d[b'nodestoremove'] |
|
273 | 273 | obj.branchtorestore = d.get(b'branchtorestore', b'') |
|
274 | 274 | obj.keep = d.get(b'keep') == cls._keep |
|
275 | 275 | obj.activebookmark = b'' |
|
276 | 276 | if d.get(b'activebook', b'') != cls._noactivebook: |
|
277 | 277 | obj.activebookmark = d.get(b'activebook', b'') |
|
278 | 278 | obj.interactive = d.get(b'interactive') == cls._interactive |
|
279 | 279 | except (error.RepoLookupError, KeyError) as err: |
|
280 | 280 | raise error.CorruptedState(pycompat.bytestr(err)) |
|
281 | 281 | |
|
282 | 282 | return obj |
|
283 | 283 | |
|
284 | 284 | @classmethod |
|
285 | 285 | def save( |
|
286 | 286 | cls, |
|
287 | 287 | repo, |
|
288 | 288 | name, |
|
289 | 289 | originalwctx, |
|
290 | 290 | pendingctx, |
|
291 | 291 | nodestoremove, |
|
292 | 292 | branchtorestore, |
|
293 | 293 | keep=False, |
|
294 | 294 | activebook=b'', |
|
295 | 295 | interactive=False, |
|
296 | 296 | ): |
|
297 | 297 | info = { |
|
298 | 298 | b"name": name, |
|
299 | 299 | b"originalwctx": nodemod.hex(originalwctx.node()), |
|
300 | 300 | b"pendingctx": nodemod.hex(pendingctx.node()), |
|
301 | 301 | b"parents": b' '.join( |
|
302 | 302 | [nodemod.hex(p) for p in repo.dirstate.parents()] |
|
303 | 303 | ), |
|
304 | 304 | b"nodestoremove": b' '.join( |
|
305 | 305 | [nodemod.hex(n) for n in nodestoremove] |
|
306 | 306 | ), |
|
307 | 307 | b"branchtorestore": branchtorestore, |
|
308 | 308 | b"keep": cls._keep if keep else cls._nokeep, |
|
309 | 309 | b"activebook": activebook or cls._noactivebook, |
|
310 | 310 | } |
|
311 | 311 | if interactive: |
|
312 | 312 | info[b'interactive'] = cls._interactive |
|
313 | 313 | scmutil.simplekeyvaluefile(repo.vfs, cls._filename).write( |
|
314 | 314 | info, firstline=(b"%d" % cls._version) |
|
315 | 315 | ) |
|
316 | 316 | |
|
317 | 317 | @classmethod |
|
318 | 318 | def clear(cls, repo): |
|
319 | 319 | repo.vfs.unlinkpath(cls._filename, ignoremissing=True) |
|
320 | 320 | |
|
321 | 321 | |
|
322 | 322 | def cleanupoldbackups(repo): |
|
323 | 323 | vfs = vfsmod.vfs(repo.vfs.join(backupdir)) |
|
324 | 324 | maxbackups = repo.ui.configint(b'shelve', b'maxbackups') |
|
325 | 325 | hgfiles = [f for f in vfs.listdir() if f.endswith(b'.' + patchextension)] |
|
326 | 326 | hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles]) |
|
327 | 327 | if maxbackups > 0 and maxbackups < len(hgfiles): |
|
328 | 328 | bordermtime = hgfiles[-maxbackups][0] |
|
329 | 329 | else: |
|
330 | 330 | bordermtime = None |
|
331 | 331 | for mtime, f in hgfiles[: len(hgfiles) - maxbackups]: |
|
332 | 332 | if mtime == bordermtime: |
|
333 | 333 | # keep it, because timestamp can't decide exact order of backups |
|
334 | 334 | continue |
|
335 | 335 | base = f[: -(1 + len(patchextension))] |
|
336 | 336 | for ext in shelvefileextensions: |
|
337 | 337 | vfs.tryunlink(base + b'.' + ext) |
|
338 | 338 | |
|
339 | 339 | |
|
340 | 340 | def _backupactivebookmark(repo): |
|
341 | 341 | activebookmark = repo._activebookmark |
|
342 | 342 | if activebookmark: |
|
343 | 343 | bookmarks.deactivate(repo) |
|
344 | 344 | return activebookmark |
|
345 | 345 | |
|
346 | 346 | |
|
347 | 347 | def _restoreactivebookmark(repo, mark): |
|
348 | 348 | if mark: |
|
349 | 349 | bookmarks.activate(repo, mark) |
|
350 | 350 | |
|
351 | 351 | |
|
352 | 352 | def _aborttransaction(repo, tr): |
|
353 | 353 | '''Abort current transaction for shelve/unshelve, but keep dirstate |
|
354 | 354 | ''' |
|
355 | 355 | dirstatebackupname = b'dirstate.shelve' |
|
356 | 356 | repo.dirstate.savebackup(tr, dirstatebackupname) |
|
357 | 357 | tr.abort() |
|
358 | 358 | repo.dirstate.restorebackup(None, dirstatebackupname) |
|
359 | 359 | |
|
360 | 360 | |
|
361 | 361 | def getshelvename(repo, parent, opts): |
|
362 | 362 | """Decide on the name this shelve is going to have""" |
|
363 | 363 | |
|
364 | 364 | def gennames(): |
|
365 | 365 | yield label |
|
366 | 366 | for i in itertools.count(1): |
|
367 | 367 | yield b'%s-%02d' % (label, i) |
|
368 | 368 | |
|
369 | 369 | name = opts.get(b'name') |
|
370 | 370 | label = repo._activebookmark or parent.branch() or b'default' |
|
371 | 371 | # slashes aren't allowed in filenames, therefore we rename it |
|
372 | 372 | label = label.replace(b'/', b'_') |
|
373 | 373 | label = label.replace(b'\\', b'_') |
|
374 | 374 | # filenames must not start with '.' as it should not be hidden |
|
375 | 375 | if label.startswith(b'.'): |
|
376 | 376 | label = label.replace(b'.', b'_', 1) |
|
377 | 377 | |
|
378 | 378 | if name: |
|
379 | 379 | if shelvedfile(repo, name, patchextension).exists(): |
|
380 | 380 | e = _(b"a shelved change named '%s' already exists") % name |
|
381 | 381 | raise error.Abort(e) |
|
382 | 382 | |
|
383 | 383 | # ensure we are not creating a subdirectory or a hidden file |
|
384 | 384 | if b'/' in name or b'\\' in name: |
|
385 | 385 | raise error.Abort( |
|
386 | 386 | _(b'shelved change names can not contain slashes') |
|
387 | 387 | ) |
|
388 | 388 | if name.startswith(b'.'): |
|
389 | 389 | raise error.Abort(_(b"shelved change names can not start with '.'")) |
|
390 | 390 | |
|
391 | 391 | else: |
|
392 | 392 | for n in gennames(): |
|
393 | 393 | if not shelvedfile(repo, n, patchextension).exists(): |
|
394 | 394 | name = n |
|
395 | 395 | break |
|
396 | 396 | |
|
397 | 397 | return name |
|
398 | 398 | |
|
399 | 399 | |
|
400 | 400 | def mutableancestors(ctx): |
|
401 | 401 | """return all mutable ancestors for ctx (included) |
|
402 | 402 | |
|
403 | 403 | Much faster than the revset ancestors(ctx) & draft()""" |
|
404 | 404 | seen = {nodemod.nullrev} |
|
405 | 405 | visit = collections.deque() |
|
406 | 406 | visit.append(ctx) |
|
407 | 407 | while visit: |
|
408 | 408 | ctx = visit.popleft() |
|
409 | 409 | yield ctx.node() |
|
410 | 410 | for parent in ctx.parents(): |
|
411 | 411 | rev = parent.rev() |
|
412 | 412 | if rev not in seen: |
|
413 | 413 | seen.add(rev) |
|
414 | 414 | if parent.mutable(): |
|
415 | 415 | visit.append(parent) |
|
416 | 416 | |
|
417 | 417 | |
|
418 | 418 | def getcommitfunc(extra, interactive, editor=False): |
|
419 | 419 | def commitfunc(ui, repo, message, match, opts): |
|
420 | 420 | hasmq = util.safehasattr(repo, b'mq') |
|
421 | 421 | if hasmq: |
|
422 | 422 | saved, repo.mq.checkapplied = repo.mq.checkapplied, False |
|
423 | 423 | |
|
424 | 424 | targetphase = phases.internal |
|
425 | 425 | if not phases.supportinternal(repo): |
|
426 | 426 | targetphase = phases.secret |
|
427 | 427 | overrides = {(b'phases', b'new-commit'): targetphase} |
|
428 | 428 | try: |
|
429 | 429 | editor_ = False |
|
430 | 430 | if editor: |
|
431 | 431 | editor_ = cmdutil.getcommiteditor( |
|
432 | 432 | editform=b'shelve.shelve', **pycompat.strkwargs(opts) |
|
433 | 433 | ) |
|
434 | 434 | with repo.ui.configoverride(overrides): |
|
435 | 435 | return repo.commit( |
|
436 | 436 | message, |
|
437 | 437 | shelveuser, |
|
438 | 438 | opts.get(b'date'), |
|
439 | 439 | match, |
|
440 | 440 | editor=editor_, |
|
441 | 441 | extra=extra, |
|
442 | 442 | ) |
|
443 | 443 | finally: |
|
444 | 444 | if hasmq: |
|
445 | 445 | repo.mq.checkapplied = saved |
|
446 | 446 | |
|
447 | 447 | def interactivecommitfunc(ui, repo, *pats, **opts): |
|
448 | 448 | opts = pycompat.byteskwargs(opts) |
|
449 | 449 | match = scmutil.match(repo[b'.'], pats, {}) |
|
450 | 450 | message = opts[b'message'] |
|
451 | 451 | return commitfunc(ui, repo, message, match, opts) |
|
452 | 452 | |
|
453 | 453 | return interactivecommitfunc if interactive else commitfunc |
|
454 | 454 | |
|
455 | 455 | |
|
456 | 456 | def _nothingtoshelvemessaging(ui, repo, pats, opts): |
|
457 | 457 | stat = repo.status(match=scmutil.match(repo[None], pats, opts)) |
|
458 | 458 | if stat.deleted: |
|
459 | 459 | ui.status( |
|
460 | 460 | _(b"nothing changed (%d missing files, see 'hg status')\n") |
|
461 | 461 | % len(stat.deleted) |
|
462 | 462 | ) |
|
463 | 463 | else: |
|
464 | 464 | ui.status(_(b"nothing changed\n")) |
|
465 | 465 | |
|
466 | 466 | |
|
467 | 467 | def _shelvecreatedcommit(repo, node, name, match): |
|
468 | 468 | info = {b'node': nodemod.hex(node)} |
|
469 | 469 | shelvedfile(repo, name, b'shelve').writeinfo(info) |
|
470 | 470 | bases = list(mutableancestors(repo[node])) |
|
471 | 471 | shelvedfile(repo, name, b'hg').writebundle(bases, node) |
|
472 | 472 | with shelvedfile(repo, name, patchextension).opener(b'wb') as fp: |
|
473 | 473 | cmdutil.exportfile( |
|
474 | 474 | repo, [node], fp, opts=mdiff.diffopts(git=True), match=match |
|
475 | 475 | ) |
|
476 | 476 | |
|
477 | 477 | |
|
478 | 478 | def _includeunknownfiles(repo, pats, opts, extra): |
|
479 | 479 | s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True) |
|
480 | 480 | if s.unknown: |
|
481 | 481 | extra[b'shelve_unknown'] = b'\0'.join(s.unknown) |
|
482 | 482 | repo[None].add(s.unknown) |
|
483 | 483 | |
|
484 | 484 | |
|
485 | 485 | def _finishshelve(repo, tr): |
|
486 | 486 | if phases.supportinternal(repo): |
|
487 | 487 | tr.close() |
|
488 | 488 | else: |
|
489 | 489 | _aborttransaction(repo, tr) |
|
490 | 490 | |
|
491 | 491 | |
|
492 | 492 | def createcmd(ui, repo, pats, opts): |
|
493 | 493 | """subcommand that creates a new shelve""" |
|
494 | 494 | with repo.wlock(): |
|
495 | 495 | cmdutil.checkunfinished(repo) |
|
496 | 496 | return _docreatecmd(ui, repo, pats, opts) |
|
497 | 497 | |
|
498 | 498 | |
|
499 | 499 | def _docreatecmd(ui, repo, pats, opts): |
|
500 | 500 | wctx = repo[None] |
|
501 | 501 | parents = wctx.parents() |
|
502 | 502 | parent = parents[0] |
|
503 | 503 | origbranch = wctx.branch() |
|
504 | 504 | |
|
505 | 505 | if parent.node() != nodemod.nullid: |
|
506 | 506 | desc = b"changes to: %s" % parent.description().split(b'\n', 1)[0] |
|
507 | 507 | else: |
|
508 | 508 | desc = b'(changes in empty repository)' |
|
509 | 509 | |
|
510 | 510 | if not opts.get(b'message'): |
|
511 | 511 | opts[b'message'] = desc |
|
512 | 512 | |
|
513 | 513 | lock = tr = activebookmark = None |
|
514 | 514 | try: |
|
515 | 515 | lock = repo.lock() |
|
516 | 516 | |
|
517 | 517 | # use an uncommitted transaction to generate the bundle to avoid |
|
518 | 518 | # pull races. ensure we don't print the abort message to stderr. |
|
519 | 519 | tr = repo.transaction(b'shelve', report=lambda x: None) |
|
520 | 520 | |
|
521 | 521 | interactive = opts.get(b'interactive', False) |
|
522 | 522 | includeunknown = opts.get(b'unknown', False) and not opts.get( |
|
523 | 523 | b'addremove', False |
|
524 | 524 | ) |
|
525 | 525 | |
|
526 | 526 | name = getshelvename(repo, parent, opts) |
|
527 | 527 | activebookmark = _backupactivebookmark(repo) |
|
528 | 528 | extra = {b'internal': b'shelve'} |
|
529 | 529 | if includeunknown: |
|
530 | 530 | _includeunknownfiles(repo, pats, opts, extra) |
|
531 | 531 | |
|
532 | 532 | if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts): |
|
533 | 533 | # In non-bare shelve we don't store newly created branch |
|
534 | 534 | # at bundled commit |
|
535 | 535 | repo.dirstate.setbranch(repo[b'.'].branch()) |
|
536 | 536 | |
|
537 | 537 | commitfunc = getcommitfunc(extra, interactive, editor=True) |
|
538 | 538 | if not interactive: |
|
539 | 539 | node = cmdutil.commit(ui, repo, commitfunc, pats, opts) |
|
540 | 540 | else: |
|
541 | 541 | node = cmdutil.dorecord( |
|
542 | 542 | ui, |
|
543 | 543 | repo, |
|
544 | 544 | commitfunc, |
|
545 | 545 | None, |
|
546 | 546 | False, |
|
547 | 547 | cmdutil.recordfilter, |
|
548 | 548 | *pats, |
|
549 | 549 | **pycompat.strkwargs(opts) |
|
550 | 550 | ) |
|
551 | 551 | if not node: |
|
552 | 552 | _nothingtoshelvemessaging(ui, repo, pats, opts) |
|
553 | 553 | return 1 |
|
554 | 554 | |
|
555 | 555 | # Create a matcher so that prefetch doesn't attempt to fetch |
|
556 | 556 | # the entire repository pointlessly, and as an optimisation |
|
557 | 557 | # for movedirstate, if needed. |
|
558 | 558 | match = scmutil.matchfiles(repo, repo[node].files()) |
|
559 | 559 | _shelvecreatedcommit(repo, node, name, match) |
|
560 | 560 | |
|
561 | 561 | ui.status(_(b'shelved as %s\n') % name) |
|
562 | 562 | if opts[b'keep']: |
|
563 | 563 | with repo.dirstate.parentchange(): |
|
564 | 564 | scmutil.movedirstate(repo, parent, match) |
|
565 | 565 | else: |
|
566 | 566 | hg.update(repo, parent.node()) |
|
567 | 567 | if origbranch != repo[b'.'].branch() and not _isbareshelve(pats, opts): |
|
568 | 568 | repo.dirstate.setbranch(origbranch) |
|
569 | 569 | |
|
570 | 570 | _finishshelve(repo, tr) |
|
571 | 571 | finally: |
|
572 | 572 | _restoreactivebookmark(repo, activebookmark) |
|
573 | 573 | lockmod.release(tr, lock) |
|
574 | 574 | |
|
575 | 575 | |
|
576 | 576 | def _isbareshelve(pats, opts): |
|
577 | 577 | return ( |
|
578 | 578 | not pats |
|
579 | 579 | and not opts.get(b'interactive', False) |
|
580 | 580 | and not opts.get(b'include', False) |
|
581 | 581 | and not opts.get(b'exclude', False) |
|
582 | 582 | ) |
|
583 | 583 | |
|
584 | 584 | |
|
585 | 585 | def _iswctxonnewbranch(repo): |
|
586 | 586 | return repo[None].branch() != repo[b'.'].branch() |
|
587 | 587 | |
|
588 | 588 | |
|
589 | 589 | def cleanupcmd(ui, repo): |
|
590 | 590 | """subcommand that deletes all shelves""" |
|
591 | 591 | |
|
592 | 592 | with repo.wlock(): |
|
593 | 593 | for (name, _type) in repo.vfs.readdir(shelvedir): |
|
594 | 594 | suffix = name.rsplit(b'.', 1)[-1] |
|
595 | 595 | if suffix in shelvefileextensions: |
|
596 | 596 | shelvedfile(repo, name).movetobackup() |
|
597 | 597 | cleanupoldbackups(repo) |
|
598 | 598 | |
|
599 | 599 | |
|
600 | 600 | def deletecmd(ui, repo, pats): |
|
601 | 601 | """subcommand that deletes a specific shelve""" |
|
602 | 602 | if not pats: |
|
603 | 603 | raise error.Abort(_(b'no shelved changes specified!')) |
|
604 | 604 | with repo.wlock(): |
|
605 | 605 | for name in pats: |
|
606 | 606 | try: |
|
607 | 607 | for suffix in shelvefileextensions: |
|
608 | 608 | shfile = shelvedfile(repo, name, suffix) |
|
609 | 609 | # patch file is necessary, as it should |
|
610 | 610 | # be present for any kind of shelve, |
|
611 | 611 | # but the .hg file is optional as in future we |
|
612 | 612 | # will add obsolete shelve with does not create a |
|
613 | 613 | # bundle |
|
614 | 614 | if shfile.exists() or suffix == patchextension: |
|
615 | 615 | shfile.movetobackup() |
|
616 | 616 | except OSError as err: |
|
617 | 617 | if err.errno != errno.ENOENT: |
|
618 | 618 | raise |
|
619 | 619 | raise error.Abort(_(b"shelved change '%s' not found") % name) |
|
620 | 620 | cleanupoldbackups(repo) |
|
621 | 621 | |
|
622 | 622 | |
|
623 | 623 | def listshelves(repo): |
|
624 | 624 | """return all shelves in repo as list of (time, filename)""" |
|
625 | 625 | try: |
|
626 | 626 | names = repo.vfs.readdir(shelvedir) |
|
627 | 627 | except OSError as err: |
|
628 | 628 | if err.errno != errno.ENOENT: |
|
629 | 629 | raise |
|
630 | 630 | return [] |
|
631 | 631 | info = [] |
|
632 | 632 | for (name, _type) in names: |
|
633 | 633 | pfx, sfx = name.rsplit(b'.', 1) |
|
634 | 634 | if not pfx or sfx != patchextension: |
|
635 | 635 | continue |
|
636 | 636 | st = shelvedfile(repo, name).stat() |
|
637 | 637 | info.append((st[stat.ST_MTIME], shelvedfile(repo, pfx).filename())) |
|
638 | 638 | return sorted(info, reverse=True) |
|
639 | 639 | |
|
640 | 640 | |
|
641 | 641 | def listcmd(ui, repo, pats, opts): |
|
642 | 642 | """subcommand that displays the list of shelves""" |
|
643 | 643 | pats = set(pats) |
|
644 | 644 | width = 80 |
|
645 | 645 | if not ui.plain(): |
|
646 | 646 | width = ui.termwidth() |
|
647 | 647 | namelabel = b'shelve.newest' |
|
648 | 648 | ui.pager(b'shelve') |
|
649 | 649 | for mtime, name in listshelves(repo): |
|
650 | 650 | sname = util.split(name)[1] |
|
651 | 651 | if pats and sname not in pats: |
|
652 | 652 | continue |
|
653 | 653 | ui.write(sname, label=namelabel) |
|
654 | 654 | namelabel = b'shelve.name' |
|
655 | 655 | if ui.quiet: |
|
656 | 656 | ui.write(b'\n') |
|
657 | 657 | continue |
|
658 | 658 | ui.write(b' ' * (16 - len(sname))) |
|
659 | 659 | used = 16 |
|
660 | 660 | date = dateutil.makedate(mtime) |
|
661 | 661 | age = b'(%s)' % templatefilters.age(date, abbrev=True) |
|
662 | 662 | ui.write(age, label=b'shelve.age') |
|
663 | 663 | ui.write(b' ' * (12 - len(age))) |
|
664 | 664 | used += 12 |
|
665 | 665 | with open(name + b'.' + patchextension, b'rb') as fp: |
|
666 | 666 | while True: |
|
667 | 667 | line = fp.readline() |
|
668 | 668 | if not line: |
|
669 | 669 | break |
|
670 | 670 | if not line.startswith(b'#'): |
|
671 | 671 | desc = line.rstrip() |
|
672 | 672 | if ui.formatted(): |
|
673 | 673 | desc = stringutil.ellipsis(desc, width - used) |
|
674 | 674 | ui.write(desc) |
|
675 | 675 | break |
|
676 | 676 | ui.write(b'\n') |
|
677 | 677 | if not (opts[b'patch'] or opts[b'stat']): |
|
678 | 678 | continue |
|
679 | 679 | difflines = fp.readlines() |
|
680 | 680 | if opts[b'patch']: |
|
681 | 681 | for chunk, label in patch.difflabel(iter, difflines): |
|
682 | 682 | ui.write(chunk, label=label) |
|
683 | 683 | if opts[b'stat']: |
|
684 | 684 | for chunk, label in patch.diffstatui(difflines, width=width): |
|
685 | 685 | ui.write(chunk, label=label) |
|
686 | 686 | |
|
687 | 687 | |
|
688 | 688 | def patchcmds(ui, repo, pats, opts): |
|
689 | 689 | """subcommand that displays shelves""" |
|
690 | 690 | if len(pats) == 0: |
|
691 | 691 | shelves = listshelves(repo) |
|
692 | 692 | if not shelves: |
|
693 | 693 | raise error.Abort(_(b"there are no shelves to show")) |
|
694 | 694 | mtime, name = shelves[0] |
|
695 | 695 | sname = util.split(name)[1] |
|
696 | 696 | pats = [sname] |
|
697 | 697 | |
|
698 | 698 | for shelfname in pats: |
|
699 | 699 | if not shelvedfile(repo, shelfname, patchextension).exists(): |
|
700 | 700 | raise error.Abort(_(b"cannot find shelf %s") % shelfname) |
|
701 | 701 | |
|
702 | 702 | listcmd(ui, repo, pats, opts) |
|
703 | 703 | |
|
704 | 704 | |
|
705 | 705 | def checkparents(repo, state): |
|
706 | 706 | """check parent while resuming an unshelve""" |
|
707 | 707 | if state.parents != repo.dirstate.parents(): |
|
708 | 708 | raise error.Abort( |
|
709 | 709 | _(b'working directory parents do not match unshelve state') |
|
710 | 710 | ) |
|
711 | 711 | |
|
712 | 712 | |
|
713 | 713 | def _loadshelvedstate(ui, repo, opts): |
|
714 | 714 | try: |
|
715 | 715 | state = shelvedstate.load(repo) |
|
716 | 716 | if opts.get(b'keep') is None: |
|
717 | 717 | opts[b'keep'] = state.keep |
|
718 | 718 | except IOError as err: |
|
719 | 719 | if err.errno != errno.ENOENT: |
|
720 | 720 | raise |
|
721 | 721 | cmdutil.wrongtooltocontinue(repo, _(b'unshelve')) |
|
722 | 722 | except error.CorruptedState as err: |
|
723 | 723 | ui.debug(pycompat.bytestr(err) + b'\n') |
|
724 | 724 | if opts.get(b'continue'): |
|
725 | 725 | msg = _(b'corrupted shelved state file') |
|
726 | 726 | hint = _( |
|
727 | 727 | b'please run hg unshelve --abort to abort unshelve ' |
|
728 | 728 | b'operation' |
|
729 | 729 | ) |
|
730 | 730 | raise error.Abort(msg, hint=hint) |
|
731 | 731 | elif opts.get(b'abort'): |
|
732 | 732 | shelvedstate.clear(repo) |
|
733 | 733 | raise error.Abort( |
|
734 | 734 | _( |
|
735 | 735 | b'could not read shelved state file, your ' |
|
736 | 736 | b'working copy may be in an unexpected state\n' |
|
737 | 737 | b'please update to some commit\n' |
|
738 | 738 | ) |
|
739 | 739 | ) |
|
740 | 740 | return state |
|
741 | 741 | |
|
742 | 742 | |
|
743 | 743 | def unshelveabort(ui, repo, state): |
|
744 | 744 | """subcommand that abort an in-progress unshelve""" |
|
745 | 745 | with repo.lock(): |
|
746 | 746 | try: |
|
747 | 747 | checkparents(repo, state) |
|
748 | 748 | |
|
749 | 749 | merge.clean_update(state.pendingctx) |
|
750 | 750 | if state.activebookmark and state.activebookmark in repo._bookmarks: |
|
751 | 751 | bookmarks.activate(repo, state.activebookmark) |
|
752 | 752 | mergefiles(ui, repo, state.wctx, state.pendingctx) |
|
753 | 753 | if not phases.supportinternal(repo): |
|
754 | 754 | repair.strip( |
|
755 | 755 | ui, repo, state.nodestoremove, backup=False, topic=b'shelve' |
|
756 | 756 | ) |
|
757 | 757 | finally: |
|
758 | 758 | shelvedstate.clear(repo) |
|
759 | 759 | ui.warn(_(b"unshelve of '%s' aborted\n") % state.name) |
|
760 | 760 | |
|
761 | 761 | |
|
762 | 762 | def hgabortunshelve(ui, repo): |
|
763 | 763 | """logic to abort unshelve using 'hg abort""" |
|
764 | 764 | with repo.wlock(): |
|
765 | 765 | state = _loadshelvedstate(ui, repo, {b'abort': True}) |
|
766 | 766 | return unshelveabort(ui, repo, state) |
|
767 | 767 | |
|
768 | 768 | |
|
769 | 769 | def mergefiles(ui, repo, wctx, shelvectx): |
|
770 | 770 | """updates to wctx and merges the changes from shelvectx into the |
|
771 | 771 | dirstate.""" |
|
772 | 772 | with ui.configoverride({(b'ui', b'quiet'): True}): |
|
773 | 773 | hg.update(repo, wctx.node()) |
|
774 | 774 | ui.pushbuffer(True) |
|
775 | 775 | cmdutil.revert(ui, repo, shelvectx) |
|
776 | 776 | ui.popbuffer() |
|
777 | 777 | |
|
778 | 778 | |
|
779 | 779 | def restorebranch(ui, repo, branchtorestore): |
|
780 | 780 | if branchtorestore and branchtorestore != repo.dirstate.branch(): |
|
781 | 781 | repo.dirstate.setbranch(branchtorestore) |
|
782 | 782 | ui.status( |
|
783 | 783 | _(b'marked working directory as branch %s\n') % branchtorestore |
|
784 | 784 | ) |
|
785 | 785 | |
|
786 | 786 | |
|
787 | 787 | def unshelvecleanup(ui, repo, name, opts): |
|
788 | 788 | """remove related files after an unshelve""" |
|
789 | 789 | if not opts.get(b'keep'): |
|
790 | 790 | for filetype in shelvefileextensions: |
|
791 | 791 | shfile = shelvedfile(repo, name, filetype) |
|
792 | 792 | if shfile.exists(): |
|
793 | 793 | shfile.movetobackup() |
|
794 | 794 | cleanupoldbackups(repo) |
|
795 | 795 | |
|
796 | 796 | |
|
797 | 797 | def unshelvecontinue(ui, repo, state, opts): |
|
798 | 798 | """subcommand to continue an in-progress unshelve""" |
|
799 | 799 | # We're finishing off a merge. First parent is our original |
|
800 | 800 | # parent, second is the temporary "fake" commit we're unshelving. |
|
801 | 801 | interactive = state.interactive |
|
802 | 802 | basename = state.name |
|
803 | 803 | with repo.lock(): |
|
804 | 804 | checkparents(repo, state) |
|
805 | 805 | ms = mergestatemod.mergestate.read(repo) |
|
806 | 806 | if list(ms.unresolved()): |
|
807 | 807 | raise error.Abort( |
|
808 | 808 | _(b"unresolved conflicts, can't continue"), |
|
809 | 809 | hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"), |
|
810 | 810 | ) |
|
811 | 811 | |
|
812 | 812 | shelvectx = repo[state.parents[1]] |
|
813 | 813 | pendingctx = state.pendingctx |
|
814 | 814 | |
|
815 | 815 | with repo.dirstate.parentchange(): |
|
816 | 816 | repo.setparents(state.pendingctx.node(), nodemod.nullid) |
|
817 | 817 | repo.dirstate.write(repo.currenttransaction()) |
|
818 | 818 | |
|
819 | 819 | targetphase = phases.internal |
|
820 | 820 | if not phases.supportinternal(repo): |
|
821 | 821 | targetphase = phases.secret |
|
822 | 822 | overrides = {(b'phases', b'new-commit'): targetphase} |
|
823 | 823 | with repo.ui.configoverride(overrides, b'unshelve'): |
|
824 | 824 | with repo.dirstate.parentchange(): |
|
825 | 825 | repo.setparents(state.parents[0], nodemod.nullid) |
|
826 | 826 | newnode, ispartialunshelve = _createunshelvectx( |
|
827 | 827 | ui, repo, shelvectx, basename, interactive, opts |
|
828 | 828 | ) |
|
829 | 829 | |
|
830 | 830 | if newnode is None: |
|
831 | 831 | shelvectx = state.pendingctx |
|
832 | 832 | msg = _( |
|
833 | 833 | b'note: unshelved changes already existed ' |
|
834 | 834 | b'in the working copy\n' |
|
835 | 835 | ) |
|
836 | 836 | ui.status(msg) |
|
837 | 837 | else: |
|
838 | 838 | # only strip the shelvectx if we produced one |
|
839 | 839 | state.nodestoremove.append(newnode) |
|
840 | 840 | shelvectx = repo[newnode] |
|
841 | 841 | |
|
842 | hg.updaterepo(repo, pendingctx.node(), overwrite=False) | |
|
842 | merge.update(pendingctx) | |
|
843 | 843 | mergefiles(ui, repo, state.wctx, shelvectx) |
|
844 | 844 | restorebranch(ui, repo, state.branchtorestore) |
|
845 | 845 | |
|
846 | 846 | if not phases.supportinternal(repo): |
|
847 | 847 | repair.strip( |
|
848 | 848 | ui, repo, state.nodestoremove, backup=False, topic=b'shelve' |
|
849 | 849 | ) |
|
850 | 850 | shelvedstate.clear(repo) |
|
851 | 851 | if not ispartialunshelve: |
|
852 | 852 | unshelvecleanup(ui, repo, state.name, opts) |
|
853 | 853 | _restoreactivebookmark(repo, state.activebookmark) |
|
854 | 854 | ui.status(_(b"unshelve of '%s' complete\n") % state.name) |
|
855 | 855 | |
|
856 | 856 | |
|
857 | 857 | def hgcontinueunshelve(ui, repo): |
|
858 | 858 | """logic to resume unshelve using 'hg continue'""" |
|
859 | 859 | with repo.wlock(): |
|
860 | 860 | state = _loadshelvedstate(ui, repo, {b'continue': True}) |
|
861 | 861 | return unshelvecontinue(ui, repo, state, {b'keep': state.keep}) |
|
862 | 862 | |
|
863 | 863 | |
|
864 | 864 | def _commitworkingcopychanges(ui, repo, opts, tmpwctx): |
|
865 | 865 | """Temporarily commit working copy changes before moving unshelve commit""" |
|
866 | 866 | # Store pending changes in a commit and remember added in case a shelve |
|
867 | 867 | # contains unknown files that are part of the pending change |
|
868 | 868 | s = repo.status() |
|
869 | 869 | addedbefore = frozenset(s.added) |
|
870 | 870 | if not (s.modified or s.added or s.removed): |
|
871 | 871 | return tmpwctx, addedbefore |
|
872 | 872 | ui.status( |
|
873 | 873 | _( |
|
874 | 874 | b"temporarily committing pending changes " |
|
875 | 875 | b"(restore with 'hg unshelve --abort')\n" |
|
876 | 876 | ) |
|
877 | 877 | ) |
|
878 | 878 | extra = {b'internal': b'shelve'} |
|
879 | 879 | commitfunc = getcommitfunc(extra=extra, interactive=False, editor=False) |
|
880 | 880 | tempopts = {} |
|
881 | 881 | tempopts[b'message'] = b"pending changes temporary commit" |
|
882 | 882 | tempopts[b'date'] = opts.get(b'date') |
|
883 | 883 | with ui.configoverride({(b'ui', b'quiet'): True}): |
|
884 | 884 | node = cmdutil.commit(ui, repo, commitfunc, [], tempopts) |
|
885 | 885 | tmpwctx = repo[node] |
|
886 | 886 | return tmpwctx, addedbefore |
|
887 | 887 | |
|
888 | 888 | |
|
889 | 889 | def _unshelverestorecommit(ui, repo, tr, basename): |
|
890 | 890 | """Recreate commit in the repository during the unshelve""" |
|
891 | 891 | repo = repo.unfiltered() |
|
892 | 892 | node = None |
|
893 | 893 | if shelvedfile(repo, basename, b'shelve').exists(): |
|
894 | 894 | node = shelvedfile(repo, basename, b'shelve').readinfo()[b'node'] |
|
895 | 895 | if node is None or node not in repo: |
|
896 | 896 | with ui.configoverride({(b'ui', b'quiet'): True}): |
|
897 | 897 | shelvectx = shelvedfile(repo, basename, b'hg').applybundle(tr) |
|
898 | 898 | # We might not strip the unbundled changeset, so we should keep track of |
|
899 | 899 | # the unshelve node in case we need to reuse it (eg: unshelve --keep) |
|
900 | 900 | if node is None: |
|
901 | 901 | info = {b'node': nodemod.hex(shelvectx.node())} |
|
902 | 902 | shelvedfile(repo, basename, b'shelve').writeinfo(info) |
|
903 | 903 | else: |
|
904 | 904 | shelvectx = repo[node] |
|
905 | 905 | |
|
906 | 906 | return repo, shelvectx |
|
907 | 907 | |
|
908 | 908 | |
|
909 | 909 | def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts): |
|
910 | 910 | """Handles the creation of unshelve commit and updates the shelve if it |
|
911 | 911 | was partially unshelved. |
|
912 | 912 | |
|
913 | 913 | If interactive is: |
|
914 | 914 | |
|
915 | 915 | * False: Commits all the changes in the working directory. |
|
916 | 916 | * True: Prompts the user to select changes to unshelve and commit them. |
|
917 | 917 | Update the shelve with remaining changes. |
|
918 | 918 | |
|
919 | 919 | Returns the node of the new commit formed and a bool indicating whether |
|
920 | 920 | the shelve was partially unshelved.Creates a commit ctx to unshelve |
|
921 | 921 | interactively or non-interactively. |
|
922 | 922 | |
|
923 | 923 | The user might want to unshelve certain changes only from the stored |
|
924 | 924 | shelve in interactive. So, we would create two commits. One with requested |
|
925 | 925 | changes to unshelve at that time and the latter is shelved for future. |
|
926 | 926 | |
|
927 | 927 | Here, we return both the newnode which is created interactively and a |
|
928 | 928 | bool to know whether the shelve is partly done or completely done. |
|
929 | 929 | """ |
|
930 | 930 | opts[b'message'] = shelvectx.description() |
|
931 | 931 | opts[b'interactive-unshelve'] = True |
|
932 | 932 | pats = [] |
|
933 | 933 | if not interactive: |
|
934 | 934 | newnode = repo.commit( |
|
935 | 935 | text=shelvectx.description(), |
|
936 | 936 | extra=shelvectx.extra(), |
|
937 | 937 | user=shelvectx.user(), |
|
938 | 938 | date=shelvectx.date(), |
|
939 | 939 | ) |
|
940 | 940 | return newnode, False |
|
941 | 941 | |
|
942 | 942 | commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, editor=True) |
|
943 | 943 | newnode = cmdutil.dorecord( |
|
944 | 944 | ui, |
|
945 | 945 | repo, |
|
946 | 946 | commitfunc, |
|
947 | 947 | None, |
|
948 | 948 | False, |
|
949 | 949 | cmdutil.recordfilter, |
|
950 | 950 | *pats, |
|
951 | 951 | **pycompat.strkwargs(opts) |
|
952 | 952 | ) |
|
953 | 953 | snode = repo.commit( |
|
954 | 954 | text=shelvectx.description(), |
|
955 | 955 | extra=shelvectx.extra(), |
|
956 | 956 | user=shelvectx.user(), |
|
957 | 957 | ) |
|
958 | 958 | if snode: |
|
959 | 959 | m = scmutil.matchfiles(repo, repo[snode].files()) |
|
960 | 960 | _shelvecreatedcommit(repo, snode, basename, m) |
|
961 | 961 | |
|
962 | 962 | return newnode, bool(snode) |
|
963 | 963 | |
|
964 | 964 | |
|
965 | 965 | def _rebaserestoredcommit( |
|
966 | 966 | ui, |
|
967 | 967 | repo, |
|
968 | 968 | opts, |
|
969 | 969 | tr, |
|
970 | 970 | oldtiprev, |
|
971 | 971 | basename, |
|
972 | 972 | pctx, |
|
973 | 973 | tmpwctx, |
|
974 | 974 | shelvectx, |
|
975 | 975 | branchtorestore, |
|
976 | 976 | activebookmark, |
|
977 | 977 | ): |
|
978 | 978 | """Rebase restored commit from its original location to a destination""" |
|
979 | 979 | # If the shelve is not immediately on top of the commit |
|
980 | 980 | # we'll be merging with, rebase it to be on top. |
|
981 | 981 | interactive = opts.get(b'interactive') |
|
982 | 982 | if tmpwctx.node() == shelvectx.p1().node() and not interactive: |
|
983 | 983 | # We won't skip on interactive mode because, the user might want to |
|
984 | 984 | # unshelve certain changes only. |
|
985 | 985 | return shelvectx, False |
|
986 | 986 | |
|
987 | 987 | overrides = { |
|
988 | 988 | (b'ui', b'forcemerge'): opts.get(b'tool', b''), |
|
989 | 989 | (b'phases', b'new-commit'): phases.secret, |
|
990 | 990 | } |
|
991 | 991 | with repo.ui.configoverride(overrides, b'unshelve'): |
|
992 | 992 | ui.status(_(b'rebasing shelved changes\n')) |
|
993 | 993 | stats = merge.graft( |
|
994 | 994 | repo, |
|
995 | 995 | shelvectx, |
|
996 | 996 | labels=[b'working-copy', b'shelve'], |
|
997 | 997 | keepconflictparent=True, |
|
998 | 998 | ) |
|
999 | 999 | if stats.unresolvedcount: |
|
1000 | 1000 | tr.close() |
|
1001 | 1001 | |
|
1002 | 1002 | nodestoremove = [ |
|
1003 | 1003 | repo.changelog.node(rev) |
|
1004 | 1004 | for rev in pycompat.xrange(oldtiprev, len(repo)) |
|
1005 | 1005 | ] |
|
1006 | 1006 | shelvedstate.save( |
|
1007 | 1007 | repo, |
|
1008 | 1008 | basename, |
|
1009 | 1009 | pctx, |
|
1010 | 1010 | tmpwctx, |
|
1011 | 1011 | nodestoremove, |
|
1012 | 1012 | branchtorestore, |
|
1013 | 1013 | opts.get(b'keep'), |
|
1014 | 1014 | activebookmark, |
|
1015 | 1015 | interactive, |
|
1016 | 1016 | ) |
|
1017 | 1017 | raise error.ConflictResolutionRequired(b'unshelve') |
|
1018 | 1018 | |
|
1019 | 1019 | with repo.dirstate.parentchange(): |
|
1020 | 1020 | repo.setparents(tmpwctx.node(), nodemod.nullid) |
|
1021 | 1021 | newnode, ispartialunshelve = _createunshelvectx( |
|
1022 | 1022 | ui, repo, shelvectx, basename, interactive, opts |
|
1023 | 1023 | ) |
|
1024 | 1024 | |
|
1025 | 1025 | if newnode is None: |
|
1026 | 1026 | shelvectx = tmpwctx |
|
1027 | 1027 | msg = _( |
|
1028 | 1028 | b'note: unshelved changes already existed ' |
|
1029 | 1029 | b'in the working copy\n' |
|
1030 | 1030 | ) |
|
1031 | 1031 | ui.status(msg) |
|
1032 | 1032 | else: |
|
1033 | 1033 | shelvectx = repo[newnode] |
|
1034 |
|
|
|
1034 | merge.update(tmpwctx) | |
|
1035 | 1035 | |
|
1036 | 1036 | return shelvectx, ispartialunshelve |
|
1037 | 1037 | |
|
1038 | 1038 | |
|
1039 | 1039 | def _forgetunknownfiles(repo, shelvectx, addedbefore): |
|
1040 | 1040 | # Forget any files that were unknown before the shelve, unknown before |
|
1041 | 1041 | # unshelve started, but are now added. |
|
1042 | 1042 | shelveunknown = shelvectx.extra().get(b'shelve_unknown') |
|
1043 | 1043 | if not shelveunknown: |
|
1044 | 1044 | return |
|
1045 | 1045 | shelveunknown = frozenset(shelveunknown.split(b'\0')) |
|
1046 | 1046 | addedafter = frozenset(repo.status().added) |
|
1047 | 1047 | toforget = (addedafter & shelveunknown) - addedbefore |
|
1048 | 1048 | repo[None].forget(toforget) |
|
1049 | 1049 | |
|
1050 | 1050 | |
|
1051 | 1051 | def _finishunshelve(repo, oldtiprev, tr, activebookmark): |
|
1052 | 1052 | _restoreactivebookmark(repo, activebookmark) |
|
1053 | 1053 | # The transaction aborting will strip all the commits for us, |
|
1054 | 1054 | # but it doesn't update the inmemory structures, so addchangegroup |
|
1055 | 1055 | # hooks still fire and try to operate on the missing commits. |
|
1056 | 1056 | # Clean up manually to prevent this. |
|
1057 | 1057 | repo.unfiltered().changelog.strip(oldtiprev, tr) |
|
1058 | 1058 | _aborttransaction(repo, tr) |
|
1059 | 1059 | |
|
1060 | 1060 | |
|
1061 | 1061 | def _checkunshelveuntrackedproblems(ui, repo, shelvectx): |
|
1062 | 1062 | """Check potential problems which may result from working |
|
1063 | 1063 | copy having untracked changes.""" |
|
1064 | 1064 | wcdeleted = set(repo.status().deleted) |
|
1065 | 1065 | shelvetouched = set(shelvectx.files()) |
|
1066 | 1066 | intersection = wcdeleted.intersection(shelvetouched) |
|
1067 | 1067 | if intersection: |
|
1068 | 1068 | m = _(b"shelved change touches missing files") |
|
1069 | 1069 | hint = _(b"run hg status to see which files are missing") |
|
1070 | 1070 | raise error.Abort(m, hint=hint) |
|
1071 | 1071 | |
|
1072 | 1072 | |
|
1073 | 1073 | def unshelvecmd(ui, repo, *shelved, **opts): |
|
1074 | 1074 | opts = pycompat.byteskwargs(opts) |
|
1075 | 1075 | abortf = opts.get(b'abort') |
|
1076 | 1076 | continuef = opts.get(b'continue') |
|
1077 | 1077 | interactive = opts.get(b'interactive') |
|
1078 | 1078 | if not abortf and not continuef: |
|
1079 | 1079 | cmdutil.checkunfinished(repo) |
|
1080 | 1080 | shelved = list(shelved) |
|
1081 | 1081 | if opts.get(b"name"): |
|
1082 | 1082 | shelved.append(opts[b"name"]) |
|
1083 | 1083 | |
|
1084 | 1084 | if interactive and opts.get(b'keep'): |
|
1085 | 1085 | raise error.Abort(_(b'--keep on --interactive is not yet supported')) |
|
1086 | 1086 | if abortf or continuef: |
|
1087 | 1087 | if abortf and continuef: |
|
1088 | 1088 | raise error.Abort(_(b'cannot use both abort and continue')) |
|
1089 | 1089 | if shelved: |
|
1090 | 1090 | raise error.Abort( |
|
1091 | 1091 | _( |
|
1092 | 1092 | b'cannot combine abort/continue with ' |
|
1093 | 1093 | b'naming a shelved change' |
|
1094 | 1094 | ) |
|
1095 | 1095 | ) |
|
1096 | 1096 | if abortf and opts.get(b'tool', False): |
|
1097 | 1097 | ui.warn(_(b'tool option will be ignored\n')) |
|
1098 | 1098 | |
|
1099 | 1099 | state = _loadshelvedstate(ui, repo, opts) |
|
1100 | 1100 | if abortf: |
|
1101 | 1101 | return unshelveabort(ui, repo, state) |
|
1102 | 1102 | elif continuef and interactive: |
|
1103 | 1103 | raise error.Abort(_(b'cannot use both continue and interactive')) |
|
1104 | 1104 | elif continuef: |
|
1105 | 1105 | return unshelvecontinue(ui, repo, state, opts) |
|
1106 | 1106 | elif len(shelved) > 1: |
|
1107 | 1107 | raise error.Abort(_(b'can only unshelve one change at a time')) |
|
1108 | 1108 | elif not shelved: |
|
1109 | 1109 | shelved = listshelves(repo) |
|
1110 | 1110 | if not shelved: |
|
1111 | 1111 | raise error.Abort(_(b'no shelved changes to apply!')) |
|
1112 | 1112 | basename = util.split(shelved[0][1])[1] |
|
1113 | 1113 | ui.status(_(b"unshelving change '%s'\n") % basename) |
|
1114 | 1114 | else: |
|
1115 | 1115 | basename = shelved[0] |
|
1116 | 1116 | |
|
1117 | 1117 | if not shelvedfile(repo, basename, patchextension).exists(): |
|
1118 | 1118 | raise error.Abort(_(b"shelved change '%s' not found") % basename) |
|
1119 | 1119 | |
|
1120 | 1120 | return _dounshelve(ui, repo, basename, opts) |
|
1121 | 1121 | |
|
1122 | 1122 | |
|
1123 | 1123 | def _dounshelve(ui, repo, basename, opts): |
|
1124 | 1124 | repo = repo.unfiltered() |
|
1125 | 1125 | lock = tr = None |
|
1126 | 1126 | try: |
|
1127 | 1127 | lock = repo.lock() |
|
1128 | 1128 | tr = repo.transaction(b'unshelve', report=lambda x: None) |
|
1129 | 1129 | oldtiprev = len(repo) |
|
1130 | 1130 | |
|
1131 | 1131 | pctx = repo[b'.'] |
|
1132 | 1132 | tmpwctx = pctx |
|
1133 | 1133 | # The goal is to have a commit structure like so: |
|
1134 | 1134 | # ...-> pctx -> tmpwctx -> shelvectx |
|
1135 | 1135 | # where tmpwctx is an optional commit with the user's pending changes |
|
1136 | 1136 | # and shelvectx is the unshelved changes. Then we merge it all down |
|
1137 | 1137 | # to the original pctx. |
|
1138 | 1138 | |
|
1139 | 1139 | activebookmark = _backupactivebookmark(repo) |
|
1140 | 1140 | tmpwctx, addedbefore = _commitworkingcopychanges( |
|
1141 | 1141 | ui, repo, opts, tmpwctx |
|
1142 | 1142 | ) |
|
1143 | 1143 | repo, shelvectx = _unshelverestorecommit(ui, repo, tr, basename) |
|
1144 | 1144 | _checkunshelveuntrackedproblems(ui, repo, shelvectx) |
|
1145 | 1145 | branchtorestore = b'' |
|
1146 | 1146 | if shelvectx.branch() != shelvectx.p1().branch(): |
|
1147 | 1147 | branchtorestore = shelvectx.branch() |
|
1148 | 1148 | |
|
1149 | 1149 | shelvectx, ispartialunshelve = _rebaserestoredcommit( |
|
1150 | 1150 | ui, |
|
1151 | 1151 | repo, |
|
1152 | 1152 | opts, |
|
1153 | 1153 | tr, |
|
1154 | 1154 | oldtiprev, |
|
1155 | 1155 | basename, |
|
1156 | 1156 | pctx, |
|
1157 | 1157 | tmpwctx, |
|
1158 | 1158 | shelvectx, |
|
1159 | 1159 | branchtorestore, |
|
1160 | 1160 | activebookmark, |
|
1161 | 1161 | ) |
|
1162 | 1162 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} |
|
1163 | 1163 | with ui.configoverride(overrides, b'unshelve'): |
|
1164 | 1164 | mergefiles(ui, repo, pctx, shelvectx) |
|
1165 | 1165 | restorebranch(ui, repo, branchtorestore) |
|
1166 | 1166 | shelvedstate.clear(repo) |
|
1167 | 1167 | _finishunshelve(repo, oldtiprev, tr, activebookmark) |
|
1168 | 1168 | _forgetunknownfiles(repo, shelvectx, addedbefore) |
|
1169 | 1169 | if not ispartialunshelve: |
|
1170 | 1170 | unshelvecleanup(ui, repo, basename, opts) |
|
1171 | 1171 | finally: |
|
1172 | 1172 | if tr: |
|
1173 | 1173 | tr.release() |
|
1174 | 1174 | lockmod.release(lock) |
@@ -1,2055 +1,2055 b'' | |||
|
1 | 1 | # subrepo.py - sub-repository classes and factory |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2009-2010 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import copy |
|
11 | 11 | import errno |
|
12 | 12 | import os |
|
13 | 13 | import re |
|
14 | 14 | import stat |
|
15 | 15 | import subprocess |
|
16 | 16 | import sys |
|
17 | 17 | import tarfile |
|
18 | 18 | import xml.dom.minidom |
|
19 | 19 | |
|
20 | 20 | from .i18n import _ |
|
21 | 21 | from . import ( |
|
22 | 22 | cmdutil, |
|
23 | 23 | encoding, |
|
24 | 24 | error, |
|
25 | 25 | exchange, |
|
26 | 26 | logcmdutil, |
|
27 | 27 | match as matchmod, |
|
28 | 28 | merge as merge, |
|
29 | 29 | node, |
|
30 | 30 | pathutil, |
|
31 | 31 | phases, |
|
32 | 32 | pycompat, |
|
33 | 33 | scmutil, |
|
34 | 34 | subrepoutil, |
|
35 | 35 | util, |
|
36 | 36 | vfs as vfsmod, |
|
37 | 37 | ) |
|
38 | 38 | from .utils import ( |
|
39 | 39 | dateutil, |
|
40 | 40 | hashutil, |
|
41 | 41 | procutil, |
|
42 | 42 | stringutil, |
|
43 | 43 | ) |
|
44 | 44 | |
|
45 | 45 | hg = None |
|
46 | 46 | reporelpath = subrepoutil.reporelpath |
|
47 | 47 | subrelpath = subrepoutil.subrelpath |
|
48 | 48 | _abssource = subrepoutil._abssource |
|
49 | 49 | propertycache = util.propertycache |
|
50 | 50 | |
|
51 | 51 | |
|
52 | 52 | def _expandedabspath(path): |
|
53 | 53 | ''' |
|
54 | 54 | get a path or url and if it is a path expand it and return an absolute path |
|
55 | 55 | ''' |
|
56 | 56 | expandedpath = util.urllocalpath(util.expandpath(path)) |
|
57 | 57 | u = util.url(expandedpath) |
|
58 | 58 | if not u.scheme: |
|
59 | 59 | path = util.normpath(os.path.abspath(u.path)) |
|
60 | 60 | return path |
|
61 | 61 | |
|
62 | 62 | |
|
63 | 63 | def _getstorehashcachename(remotepath): |
|
64 | 64 | '''get a unique filename for the store hash cache of a remote repository''' |
|
65 | 65 | return node.hex(hashutil.sha1(_expandedabspath(remotepath)).digest())[0:12] |
|
66 | 66 | |
|
67 | 67 | |
|
68 | 68 | class SubrepoAbort(error.Abort): |
|
69 | 69 | """Exception class used to avoid handling a subrepo error more than once""" |
|
70 | 70 | |
|
71 | 71 | def __init__(self, *args, **kw): |
|
72 | 72 | self.subrepo = kw.pop('subrepo', None) |
|
73 | 73 | self.cause = kw.pop('cause', None) |
|
74 | 74 | error.Abort.__init__(self, *args, **kw) |
|
75 | 75 | |
|
76 | 76 | |
|
77 | 77 | def annotatesubrepoerror(func): |
|
78 | 78 | def decoratedmethod(self, *args, **kargs): |
|
79 | 79 | try: |
|
80 | 80 | res = func(self, *args, **kargs) |
|
81 | 81 | except SubrepoAbort as ex: |
|
82 | 82 | # This exception has already been handled |
|
83 | 83 | raise ex |
|
84 | 84 | except error.Abort as ex: |
|
85 | 85 | subrepo = subrelpath(self) |
|
86 | 86 | errormsg = ( |
|
87 | 87 | stringutil.forcebytestr(ex) |
|
88 | 88 | + b' ' |
|
89 | 89 | + _(b'(in subrepository "%s")') % subrepo |
|
90 | 90 | ) |
|
91 | 91 | # avoid handling this exception by raising a SubrepoAbort exception |
|
92 | 92 | raise SubrepoAbort( |
|
93 | 93 | errormsg, hint=ex.hint, subrepo=subrepo, cause=sys.exc_info() |
|
94 | 94 | ) |
|
95 | 95 | return res |
|
96 | 96 | |
|
97 | 97 | return decoratedmethod |
|
98 | 98 | |
|
99 | 99 | |
|
100 | 100 | def _updateprompt(ui, sub, dirty, local, remote): |
|
101 | 101 | if dirty: |
|
102 | 102 | msg = _( |
|
103 | 103 | b' subrepository sources for %s differ\n' |
|
104 | 104 | b'you can use (l)ocal source (%s) or (r)emote source (%s).\n' |
|
105 | 105 | b'what do you want to do?' |
|
106 | 106 | b'$$ &Local $$ &Remote' |
|
107 | 107 | ) % (subrelpath(sub), local, remote) |
|
108 | 108 | else: |
|
109 | 109 | msg = _( |
|
110 | 110 | b' subrepository sources for %s differ (in checked out ' |
|
111 | 111 | b'version)\n' |
|
112 | 112 | b'you can use (l)ocal source (%s) or (r)emote source (%s).\n' |
|
113 | 113 | b'what do you want to do?' |
|
114 | 114 | b'$$ &Local $$ &Remote' |
|
115 | 115 | ) % (subrelpath(sub), local, remote) |
|
116 | 116 | return ui.promptchoice(msg, 0) |
|
117 | 117 | |
|
118 | 118 | |
|
119 | 119 | def _sanitize(ui, vfs, ignore): |
|
120 | 120 | for dirname, dirs, names in vfs.walk(): |
|
121 | 121 | for i, d in enumerate(dirs): |
|
122 | 122 | if d.lower() == ignore: |
|
123 | 123 | del dirs[i] |
|
124 | 124 | break |
|
125 | 125 | if vfs.basename(dirname).lower() != b'.hg': |
|
126 | 126 | continue |
|
127 | 127 | for f in names: |
|
128 | 128 | if f.lower() == b'hgrc': |
|
129 | 129 | ui.warn( |
|
130 | 130 | _( |
|
131 | 131 | b"warning: removing potentially hostile 'hgrc' " |
|
132 | 132 | b"in '%s'\n" |
|
133 | 133 | ) |
|
134 | 134 | % vfs.join(dirname) |
|
135 | 135 | ) |
|
136 | 136 | vfs.unlink(vfs.reljoin(dirname, f)) |
|
137 | 137 | |
|
138 | 138 | |
|
139 | 139 | def _auditsubrepopath(repo, path): |
|
140 | 140 | # sanity check for potentially unsafe paths such as '~' and '$FOO' |
|
141 | 141 | if path.startswith(b'~') or b'$' in path or util.expandpath(path) != path: |
|
142 | 142 | raise error.Abort( |
|
143 | 143 | _(b'subrepo path contains illegal component: %s') % path |
|
144 | 144 | ) |
|
145 | 145 | # auditor doesn't check if the path itself is a symlink |
|
146 | 146 | pathutil.pathauditor(repo.root)(path) |
|
147 | 147 | if repo.wvfs.islink(path): |
|
148 | 148 | raise error.Abort(_(b"subrepo '%s' traverses symbolic link") % path) |
|
149 | 149 | |
|
150 | 150 | |
|
151 | 151 | SUBREPO_ALLOWED_DEFAULTS = { |
|
152 | 152 | b'hg': True, |
|
153 | 153 | b'git': False, |
|
154 | 154 | b'svn': False, |
|
155 | 155 | } |
|
156 | 156 | |
|
157 | 157 | |
|
158 | 158 | def _checktype(ui, kind): |
|
159 | 159 | # subrepos.allowed is a master kill switch. If disabled, subrepos are |
|
160 | 160 | # disabled period. |
|
161 | 161 | if not ui.configbool(b'subrepos', b'allowed', True): |
|
162 | 162 | raise error.Abort( |
|
163 | 163 | _(b'subrepos not enabled'), |
|
164 | 164 | hint=_(b"see 'hg help config.subrepos' for details"), |
|
165 | 165 | ) |
|
166 | 166 | |
|
167 | 167 | default = SUBREPO_ALLOWED_DEFAULTS.get(kind, False) |
|
168 | 168 | if not ui.configbool(b'subrepos', b'%s:allowed' % kind, default): |
|
169 | 169 | raise error.Abort( |
|
170 | 170 | _(b'%s subrepos not allowed') % kind, |
|
171 | 171 | hint=_(b"see 'hg help config.subrepos' for details"), |
|
172 | 172 | ) |
|
173 | 173 | |
|
174 | 174 | if kind not in types: |
|
175 | 175 | raise error.Abort(_(b'unknown subrepo type %s') % kind) |
|
176 | 176 | |
|
177 | 177 | |
|
178 | 178 | def subrepo(ctx, path, allowwdir=False, allowcreate=True): |
|
179 | 179 | """return instance of the right subrepo class for subrepo in path""" |
|
180 | 180 | # subrepo inherently violates our import layering rules |
|
181 | 181 | # because it wants to make repo objects from deep inside the stack |
|
182 | 182 | # so we manually delay the circular imports to not break |
|
183 | 183 | # scripts that don't use our demand-loading |
|
184 | 184 | global hg |
|
185 | 185 | from . import hg as h |
|
186 | 186 | |
|
187 | 187 | hg = h |
|
188 | 188 | |
|
189 | 189 | repo = ctx.repo() |
|
190 | 190 | _auditsubrepopath(repo, path) |
|
191 | 191 | state = ctx.substate[path] |
|
192 | 192 | _checktype(repo.ui, state[2]) |
|
193 | 193 | if allowwdir: |
|
194 | 194 | state = (state[0], ctx.subrev(path), state[2]) |
|
195 | 195 | return types[state[2]](ctx, path, state[:2], allowcreate) |
|
196 | 196 | |
|
197 | 197 | |
|
198 | 198 | def nullsubrepo(ctx, path, pctx): |
|
199 | 199 | """return an empty subrepo in pctx for the extant subrepo in ctx""" |
|
200 | 200 | # subrepo inherently violates our import layering rules |
|
201 | 201 | # because it wants to make repo objects from deep inside the stack |
|
202 | 202 | # so we manually delay the circular imports to not break |
|
203 | 203 | # scripts that don't use our demand-loading |
|
204 | 204 | global hg |
|
205 | 205 | from . import hg as h |
|
206 | 206 | |
|
207 | 207 | hg = h |
|
208 | 208 | |
|
209 | 209 | repo = ctx.repo() |
|
210 | 210 | _auditsubrepopath(repo, path) |
|
211 | 211 | state = ctx.substate[path] |
|
212 | 212 | _checktype(repo.ui, state[2]) |
|
213 | 213 | subrev = b'' |
|
214 | 214 | if state[2] == b'hg': |
|
215 | 215 | subrev = b"0" * 40 |
|
216 | 216 | return types[state[2]](pctx, path, (state[0], subrev), True) |
|
217 | 217 | |
|
218 | 218 | |
|
219 | 219 | # subrepo classes need to implement the following abstract class: |
|
220 | 220 | |
|
221 | 221 | |
|
222 | 222 | class abstractsubrepo(object): |
|
223 | 223 | def __init__(self, ctx, path): |
|
224 | 224 | """Initialize abstractsubrepo part |
|
225 | 225 | |
|
226 | 226 | ``ctx`` is the context referring this subrepository in the |
|
227 | 227 | parent repository. |
|
228 | 228 | |
|
229 | 229 | ``path`` is the path to this subrepository as seen from |
|
230 | 230 | innermost repository. |
|
231 | 231 | """ |
|
232 | 232 | self.ui = ctx.repo().ui |
|
233 | 233 | self._ctx = ctx |
|
234 | 234 | self._path = path |
|
235 | 235 | |
|
236 | 236 | def addwebdirpath(self, serverpath, webconf): |
|
237 | 237 | """Add the hgwebdir entries for this subrepo, and any of its subrepos. |
|
238 | 238 | |
|
239 | 239 | ``serverpath`` is the path component of the URL for this repo. |
|
240 | 240 | |
|
241 | 241 | ``webconf`` is the dictionary of hgwebdir entries. |
|
242 | 242 | """ |
|
243 | 243 | pass |
|
244 | 244 | |
|
245 | 245 | def storeclean(self, path): |
|
246 | 246 | """ |
|
247 | 247 | returns true if the repository has not changed since it was last |
|
248 | 248 | cloned from or pushed to a given repository. |
|
249 | 249 | """ |
|
250 | 250 | return False |
|
251 | 251 | |
|
252 | 252 | def dirty(self, ignoreupdate=False, missing=False): |
|
253 | 253 | """returns true if the dirstate of the subrepo is dirty or does not |
|
254 | 254 | match current stored state. If ignoreupdate is true, only check |
|
255 | 255 | whether the subrepo has uncommitted changes in its dirstate. If missing |
|
256 | 256 | is true, check for deleted files. |
|
257 | 257 | """ |
|
258 | 258 | raise NotImplementedError |
|
259 | 259 | |
|
260 | 260 | def dirtyreason(self, ignoreupdate=False, missing=False): |
|
261 | 261 | """return reason string if it is ``dirty()`` |
|
262 | 262 | |
|
263 | 263 | Returned string should have enough information for the message |
|
264 | 264 | of exception. |
|
265 | 265 | |
|
266 | 266 | This returns None, otherwise. |
|
267 | 267 | """ |
|
268 | 268 | if self.dirty(ignoreupdate=ignoreupdate, missing=missing): |
|
269 | 269 | return _(b'uncommitted changes in subrepository "%s"') % subrelpath( |
|
270 | 270 | self |
|
271 | 271 | ) |
|
272 | 272 | |
|
273 | 273 | def bailifchanged(self, ignoreupdate=False, hint=None): |
|
274 | 274 | """raise Abort if subrepository is ``dirty()`` |
|
275 | 275 | """ |
|
276 | 276 | dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate, missing=True) |
|
277 | 277 | if dirtyreason: |
|
278 | 278 | raise error.Abort(dirtyreason, hint=hint) |
|
279 | 279 | |
|
280 | 280 | def basestate(self): |
|
281 | 281 | """current working directory base state, disregarding .hgsubstate |
|
282 | 282 | state and working directory modifications""" |
|
283 | 283 | raise NotImplementedError |
|
284 | 284 | |
|
285 | 285 | def checknested(self, path): |
|
286 | 286 | """check if path is a subrepository within this repository""" |
|
287 | 287 | return False |
|
288 | 288 | |
|
289 | 289 | def commit(self, text, user, date): |
|
290 | 290 | """commit the current changes to the subrepo with the given |
|
291 | 291 | log message. Use given user and date if possible. Return the |
|
292 | 292 | new state of the subrepo. |
|
293 | 293 | """ |
|
294 | 294 | raise NotImplementedError |
|
295 | 295 | |
|
296 | 296 | def phase(self, state): |
|
297 | 297 | """returns phase of specified state in the subrepository. |
|
298 | 298 | """ |
|
299 | 299 | return phases.public |
|
300 | 300 | |
|
301 | 301 | def remove(self): |
|
302 | 302 | """remove the subrepo |
|
303 | 303 | |
|
304 | 304 | (should verify the dirstate is not dirty first) |
|
305 | 305 | """ |
|
306 | 306 | raise NotImplementedError |
|
307 | 307 | |
|
308 | 308 | def get(self, state, overwrite=False): |
|
309 | 309 | """run whatever commands are needed to put the subrepo into |
|
310 | 310 | this state |
|
311 | 311 | """ |
|
312 | 312 | raise NotImplementedError |
|
313 | 313 | |
|
314 | 314 | def merge(self, state): |
|
315 | 315 | """merge currently-saved state with the new state.""" |
|
316 | 316 | raise NotImplementedError |
|
317 | 317 | |
|
318 | 318 | def push(self, opts): |
|
319 | 319 | """perform whatever action is analogous to 'hg push' |
|
320 | 320 | |
|
321 | 321 | This may be a no-op on some systems. |
|
322 | 322 | """ |
|
323 | 323 | raise NotImplementedError |
|
324 | 324 | |
|
325 | 325 | def add(self, ui, match, prefix, uipathfn, explicitonly, **opts): |
|
326 | 326 | return [] |
|
327 | 327 | |
|
328 | 328 | def addremove(self, matcher, prefix, uipathfn, opts): |
|
329 | 329 | self.ui.warn(b"%s: %s" % (prefix, _(b"addremove is not supported"))) |
|
330 | 330 | return 1 |
|
331 | 331 | |
|
332 | 332 | def cat(self, match, fm, fntemplate, prefix, **opts): |
|
333 | 333 | return 1 |
|
334 | 334 | |
|
335 | 335 | def status(self, rev2, **opts): |
|
336 | 336 | return scmutil.status([], [], [], [], [], [], []) |
|
337 | 337 | |
|
338 | 338 | def diff(self, ui, diffopts, node2, match, prefix, **opts): |
|
339 | 339 | pass |
|
340 | 340 | |
|
341 | 341 | def outgoing(self, ui, dest, opts): |
|
342 | 342 | return 1 |
|
343 | 343 | |
|
344 | 344 | def incoming(self, ui, source, opts): |
|
345 | 345 | return 1 |
|
346 | 346 | |
|
347 | 347 | def files(self): |
|
348 | 348 | """return filename iterator""" |
|
349 | 349 | raise NotImplementedError |
|
350 | 350 | |
|
351 | 351 | def filedata(self, name, decode): |
|
352 | 352 | """return file data, optionally passed through repo decoders""" |
|
353 | 353 | raise NotImplementedError |
|
354 | 354 | |
|
355 | 355 | def fileflags(self, name): |
|
356 | 356 | """return file flags""" |
|
357 | 357 | return b'' |
|
358 | 358 | |
|
359 | 359 | def matchfileset(self, cwd, expr, badfn=None): |
|
360 | 360 | """Resolve the fileset expression for this repo""" |
|
361 | 361 | return matchmod.never(badfn=badfn) |
|
362 | 362 | |
|
363 | 363 | def printfiles(self, ui, m, uipathfn, fm, fmt, subrepos): |
|
364 | 364 | """handle the files command for this subrepo""" |
|
365 | 365 | return 1 |
|
366 | 366 | |
|
367 | 367 | def archive(self, archiver, prefix, match=None, decode=True): |
|
368 | 368 | if match is not None: |
|
369 | 369 | files = [f for f in self.files() if match(f)] |
|
370 | 370 | else: |
|
371 | 371 | files = self.files() |
|
372 | 372 | total = len(files) |
|
373 | 373 | relpath = subrelpath(self) |
|
374 | 374 | progress = self.ui.makeprogress( |
|
375 | 375 | _(b'archiving (%s)') % relpath, unit=_(b'files'), total=total |
|
376 | 376 | ) |
|
377 | 377 | progress.update(0) |
|
378 | 378 | for name in files: |
|
379 | 379 | flags = self.fileflags(name) |
|
380 | 380 | mode = b'x' in flags and 0o755 or 0o644 |
|
381 | 381 | symlink = b'l' in flags |
|
382 | 382 | archiver.addfile( |
|
383 | 383 | prefix + name, mode, symlink, self.filedata(name, decode) |
|
384 | 384 | ) |
|
385 | 385 | progress.increment() |
|
386 | 386 | progress.complete() |
|
387 | 387 | return total |
|
388 | 388 | |
|
389 | 389 | def walk(self, match): |
|
390 | 390 | ''' |
|
391 | 391 | walk recursively through the directory tree, finding all files |
|
392 | 392 | matched by the match function |
|
393 | 393 | ''' |
|
394 | 394 | |
|
395 | 395 | def forget(self, match, prefix, uipathfn, dryrun, interactive): |
|
396 | 396 | return ([], []) |
|
397 | 397 | |
|
398 | 398 | def removefiles( |
|
399 | 399 | self, |
|
400 | 400 | matcher, |
|
401 | 401 | prefix, |
|
402 | 402 | uipathfn, |
|
403 | 403 | after, |
|
404 | 404 | force, |
|
405 | 405 | subrepos, |
|
406 | 406 | dryrun, |
|
407 | 407 | warnings, |
|
408 | 408 | ): |
|
409 | 409 | """remove the matched files from the subrepository and the filesystem, |
|
410 | 410 | possibly by force and/or after the file has been removed from the |
|
411 | 411 | filesystem. Return 0 on success, 1 on any warning. |
|
412 | 412 | """ |
|
413 | 413 | warnings.append( |
|
414 | 414 | _(b"warning: removefiles not implemented (%s)") % self._path |
|
415 | 415 | ) |
|
416 | 416 | return 1 |
|
417 | 417 | |
|
418 | 418 | def revert(self, substate, *pats, **opts): |
|
419 | 419 | self.ui.warn( |
|
420 | 420 | _(b'%s: reverting %s subrepos is unsupported\n') |
|
421 | 421 | % (substate[0], substate[2]) |
|
422 | 422 | ) |
|
423 | 423 | return [] |
|
424 | 424 | |
|
425 | 425 | def shortid(self, revid): |
|
426 | 426 | return revid |
|
427 | 427 | |
|
428 | 428 | def unshare(self): |
|
429 | 429 | ''' |
|
430 | 430 | convert this repository from shared to normal storage. |
|
431 | 431 | ''' |
|
432 | 432 | |
|
433 | 433 | def verify(self, onpush=False): |
|
434 | 434 | """verify the revision of this repository that is held in `_state` is |
|
435 | 435 | present and not hidden. Return 0 on success or warning, 1 on any |
|
436 | 436 | error. In the case of ``onpush``, warnings or errors will raise an |
|
437 | 437 | exception if the result of pushing would be a broken remote repository. |
|
438 | 438 | """ |
|
439 | 439 | return 0 |
|
440 | 440 | |
|
441 | 441 | @propertycache |
|
442 | 442 | def wvfs(self): |
|
443 | 443 | """return vfs to access the working directory of this subrepository |
|
444 | 444 | """ |
|
445 | 445 | return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path)) |
|
446 | 446 | |
|
447 | 447 | @propertycache |
|
448 | 448 | def _relpath(self): |
|
449 | 449 | """return path to this subrepository as seen from outermost repository |
|
450 | 450 | """ |
|
451 | 451 | return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path) |
|
452 | 452 | |
|
453 | 453 | |
|
454 | 454 | class hgsubrepo(abstractsubrepo): |
|
455 | 455 | def __init__(self, ctx, path, state, allowcreate): |
|
456 | 456 | super(hgsubrepo, self).__init__(ctx, path) |
|
457 | 457 | self._state = state |
|
458 | 458 | r = ctx.repo() |
|
459 | 459 | root = r.wjoin(util.localpath(path)) |
|
460 | 460 | create = allowcreate and not r.wvfs.exists(b'%s/.hg' % path) |
|
461 | 461 | # repository constructor does expand variables in path, which is |
|
462 | 462 | # unsafe since subrepo path might come from untrusted source. |
|
463 | 463 | if os.path.realpath(util.expandpath(root)) != root: |
|
464 | 464 | raise error.Abort( |
|
465 | 465 | _(b'subrepo path contains illegal component: %s') % path |
|
466 | 466 | ) |
|
467 | 467 | self._repo = hg.repository(r.baseui, root, create=create) |
|
468 | 468 | if self._repo.root != root: |
|
469 | 469 | raise error.ProgrammingError( |
|
470 | 470 | b'failed to reject unsafe subrepo ' |
|
471 | 471 | b'path: %s (expanded to %s)' % (root, self._repo.root) |
|
472 | 472 | ) |
|
473 | 473 | |
|
474 | 474 | # Propagate the parent's --hidden option |
|
475 | 475 | if r is r.unfiltered(): |
|
476 | 476 | self._repo = self._repo.unfiltered() |
|
477 | 477 | |
|
478 | 478 | self.ui = self._repo.ui |
|
479 | 479 | for s, k in [(b'ui', b'commitsubrepos')]: |
|
480 | 480 | v = r.ui.config(s, k) |
|
481 | 481 | if v: |
|
482 | 482 | self.ui.setconfig(s, k, v, b'subrepo') |
|
483 | 483 | # internal config: ui._usedassubrepo |
|
484 | 484 | self.ui.setconfig(b'ui', b'_usedassubrepo', b'True', b'subrepo') |
|
485 | 485 | self._initrepo(r, state[0], create) |
|
486 | 486 | |
|
487 | 487 | @annotatesubrepoerror |
|
488 | 488 | def addwebdirpath(self, serverpath, webconf): |
|
489 | 489 | cmdutil.addwebdirpath(self._repo, subrelpath(self), webconf) |
|
490 | 490 | |
|
491 | 491 | def storeclean(self, path): |
|
492 | 492 | with self._repo.lock(): |
|
493 | 493 | return self._storeclean(path) |
|
494 | 494 | |
|
495 | 495 | def _storeclean(self, path): |
|
496 | 496 | clean = True |
|
497 | 497 | itercache = self._calcstorehash(path) |
|
498 | 498 | for filehash in self._readstorehashcache(path): |
|
499 | 499 | if filehash != next(itercache, None): |
|
500 | 500 | clean = False |
|
501 | 501 | break |
|
502 | 502 | if clean: |
|
503 | 503 | # if not empty: |
|
504 | 504 | # the cached and current pull states have a different size |
|
505 | 505 | clean = next(itercache, None) is None |
|
506 | 506 | return clean |
|
507 | 507 | |
|
508 | 508 | def _calcstorehash(self, remotepath): |
|
509 | 509 | '''calculate a unique "store hash" |
|
510 | 510 | |
|
511 | 511 | This method is used to to detect when there are changes that may |
|
512 | 512 | require a push to a given remote path.''' |
|
513 | 513 | # sort the files that will be hashed in increasing (likely) file size |
|
514 | 514 | filelist = (b'bookmarks', b'store/phaseroots', b'store/00changelog.i') |
|
515 | 515 | yield b'# %s\n' % _expandedabspath(remotepath) |
|
516 | 516 | vfs = self._repo.vfs |
|
517 | 517 | for relname in filelist: |
|
518 | 518 | filehash = node.hex(hashutil.sha1(vfs.tryread(relname)).digest()) |
|
519 | 519 | yield b'%s = %s\n' % (relname, filehash) |
|
520 | 520 | |
|
521 | 521 | @propertycache |
|
522 | 522 | def _cachestorehashvfs(self): |
|
523 | 523 | return vfsmod.vfs(self._repo.vfs.join(b'cache/storehash')) |
|
524 | 524 | |
|
525 | 525 | def _readstorehashcache(self, remotepath): |
|
526 | 526 | '''read the store hash cache for a given remote repository''' |
|
527 | 527 | cachefile = _getstorehashcachename(remotepath) |
|
528 | 528 | return self._cachestorehashvfs.tryreadlines(cachefile, b'r') |
|
529 | 529 | |
|
530 | 530 | def _cachestorehash(self, remotepath): |
|
531 | 531 | '''cache the current store hash |
|
532 | 532 | |
|
533 | 533 | Each remote repo requires its own store hash cache, because a subrepo |
|
534 | 534 | store may be "clean" versus a given remote repo, but not versus another |
|
535 | 535 | ''' |
|
536 | 536 | cachefile = _getstorehashcachename(remotepath) |
|
537 | 537 | with self._repo.lock(): |
|
538 | 538 | storehash = list(self._calcstorehash(remotepath)) |
|
539 | 539 | vfs = self._cachestorehashvfs |
|
540 | 540 | vfs.writelines(cachefile, storehash, mode=b'wb', notindexed=True) |
|
541 | 541 | |
|
542 | 542 | def _getctx(self): |
|
543 | 543 | '''fetch the context for this subrepo revision, possibly a workingctx |
|
544 | 544 | ''' |
|
545 | 545 | if self._ctx.rev() is None: |
|
546 | 546 | return self._repo[None] # workingctx if parent is workingctx |
|
547 | 547 | else: |
|
548 | 548 | rev = self._state[1] |
|
549 | 549 | return self._repo[rev] |
|
550 | 550 | |
|
551 | 551 | @annotatesubrepoerror |
|
552 | 552 | def _initrepo(self, parentrepo, source, create): |
|
553 | 553 | self._repo._subparent = parentrepo |
|
554 | 554 | self._repo._subsource = source |
|
555 | 555 | |
|
556 | 556 | if create: |
|
557 | 557 | lines = [b'[paths]\n'] |
|
558 | 558 | |
|
559 | 559 | def addpathconfig(key, value): |
|
560 | 560 | if value: |
|
561 | 561 | lines.append(b'%s = %s\n' % (key, value)) |
|
562 | 562 | self.ui.setconfig(b'paths', key, value, b'subrepo') |
|
563 | 563 | |
|
564 | 564 | defpath = _abssource(self._repo, abort=False) |
|
565 | 565 | defpushpath = _abssource(self._repo, True, abort=False) |
|
566 | 566 | addpathconfig(b'default', defpath) |
|
567 | 567 | if defpath != defpushpath: |
|
568 | 568 | addpathconfig(b'default-push', defpushpath) |
|
569 | 569 | |
|
570 | 570 | self._repo.vfs.write(b'hgrc', util.tonativeeol(b''.join(lines))) |
|
571 | 571 | |
|
572 | 572 | @annotatesubrepoerror |
|
573 | 573 | def add(self, ui, match, prefix, uipathfn, explicitonly, **opts): |
|
574 | 574 | return cmdutil.add( |
|
575 | 575 | ui, self._repo, match, prefix, uipathfn, explicitonly, **opts |
|
576 | 576 | ) |
|
577 | 577 | |
|
578 | 578 | @annotatesubrepoerror |
|
579 | 579 | def addremove(self, m, prefix, uipathfn, opts): |
|
580 | 580 | # In the same way as sub directories are processed, once in a subrepo, |
|
581 | 581 | # always entry any of its subrepos. Don't corrupt the options that will |
|
582 | 582 | # be used to process sibling subrepos however. |
|
583 | 583 | opts = copy.copy(opts) |
|
584 | 584 | opts[b'subrepos'] = True |
|
585 | 585 | return scmutil.addremove(self._repo, m, prefix, uipathfn, opts) |
|
586 | 586 | |
|
587 | 587 | @annotatesubrepoerror |
|
588 | 588 | def cat(self, match, fm, fntemplate, prefix, **opts): |
|
589 | 589 | rev = self._state[1] |
|
590 | 590 | ctx = self._repo[rev] |
|
591 | 591 | return cmdutil.cat( |
|
592 | 592 | self.ui, self._repo, ctx, match, fm, fntemplate, prefix, **opts |
|
593 | 593 | ) |
|
594 | 594 | |
|
595 | 595 | @annotatesubrepoerror |
|
596 | 596 | def status(self, rev2, **opts): |
|
597 | 597 | try: |
|
598 | 598 | rev1 = self._state[1] |
|
599 | 599 | ctx1 = self._repo[rev1] |
|
600 | 600 | ctx2 = self._repo[rev2] |
|
601 | 601 | return self._repo.status(ctx1, ctx2, **opts) |
|
602 | 602 | except error.RepoLookupError as inst: |
|
603 | 603 | self.ui.warn( |
|
604 | 604 | _(b'warning: error "%s" in subrepository "%s"\n') |
|
605 | 605 | % (inst, subrelpath(self)) |
|
606 | 606 | ) |
|
607 | 607 | return scmutil.status([], [], [], [], [], [], []) |
|
608 | 608 | |
|
609 | 609 | @annotatesubrepoerror |
|
610 | 610 | def diff(self, ui, diffopts, node2, match, prefix, **opts): |
|
611 | 611 | try: |
|
612 | 612 | node1 = node.bin(self._state[1]) |
|
613 | 613 | # We currently expect node2 to come from substate and be |
|
614 | 614 | # in hex format |
|
615 | 615 | if node2 is not None: |
|
616 | 616 | node2 = node.bin(node2) |
|
617 | 617 | logcmdutil.diffordiffstat( |
|
618 | 618 | ui, |
|
619 | 619 | self._repo, |
|
620 | 620 | diffopts, |
|
621 | 621 | self._repo[node1], |
|
622 | 622 | self._repo[node2], |
|
623 | 623 | match, |
|
624 | 624 | prefix=prefix, |
|
625 | 625 | listsubrepos=True, |
|
626 | 626 | **opts |
|
627 | 627 | ) |
|
628 | 628 | except error.RepoLookupError as inst: |
|
629 | 629 | self.ui.warn( |
|
630 | 630 | _(b'warning: error "%s" in subrepository "%s"\n') |
|
631 | 631 | % (inst, subrelpath(self)) |
|
632 | 632 | ) |
|
633 | 633 | |
|
634 | 634 | @annotatesubrepoerror |
|
635 | 635 | def archive(self, archiver, prefix, match=None, decode=True): |
|
636 | 636 | self._get(self._state + (b'hg',)) |
|
637 | 637 | files = self.files() |
|
638 | 638 | if match: |
|
639 | 639 | files = [f for f in files if match(f)] |
|
640 | 640 | rev = self._state[1] |
|
641 | 641 | ctx = self._repo[rev] |
|
642 | 642 | scmutil.prefetchfiles( |
|
643 | 643 | self._repo, [(ctx.rev(), scmutil.matchfiles(self._repo, files))] |
|
644 | 644 | ) |
|
645 | 645 | total = abstractsubrepo.archive(self, archiver, prefix, match) |
|
646 | 646 | for subpath in ctx.substate: |
|
647 | 647 | s = subrepo(ctx, subpath, True) |
|
648 | 648 | submatch = matchmod.subdirmatcher(subpath, match) |
|
649 | 649 | subprefix = prefix + subpath + b'/' |
|
650 | 650 | total += s.archive(archiver, subprefix, submatch, decode) |
|
651 | 651 | return total |
|
652 | 652 | |
|
653 | 653 | @annotatesubrepoerror |
|
654 | 654 | def dirty(self, ignoreupdate=False, missing=False): |
|
655 | 655 | r = self._state[1] |
|
656 | 656 | if r == b'' and not ignoreupdate: # no state recorded |
|
657 | 657 | return True |
|
658 | 658 | w = self._repo[None] |
|
659 | 659 | if r != w.p1().hex() and not ignoreupdate: |
|
660 | 660 | # different version checked out |
|
661 | 661 | return True |
|
662 | 662 | return w.dirty(missing=missing) # working directory changed |
|
663 | 663 | |
|
664 | 664 | def basestate(self): |
|
665 | 665 | return self._repo[b'.'].hex() |
|
666 | 666 | |
|
667 | 667 | def checknested(self, path): |
|
668 | 668 | return self._repo._checknested(self._repo.wjoin(path)) |
|
669 | 669 | |
|
670 | 670 | @annotatesubrepoerror |
|
671 | 671 | def commit(self, text, user, date): |
|
672 | 672 | # don't bother committing in the subrepo if it's only been |
|
673 | 673 | # updated |
|
674 | 674 | if not self.dirty(True): |
|
675 | 675 | return self._repo[b'.'].hex() |
|
676 | 676 | self.ui.debug(b"committing subrepo %s\n" % subrelpath(self)) |
|
677 | 677 | n = self._repo.commit(text, user, date) |
|
678 | 678 | if not n: |
|
679 | 679 | return self._repo[b'.'].hex() # different version checked out |
|
680 | 680 | return node.hex(n) |
|
681 | 681 | |
|
682 | 682 | @annotatesubrepoerror |
|
683 | 683 | def phase(self, state): |
|
684 | 684 | return self._repo[state or b'.'].phase() |
|
685 | 685 | |
|
686 | 686 | @annotatesubrepoerror |
|
687 | 687 | def remove(self): |
|
688 | 688 | # we can't fully delete the repository as it may contain |
|
689 | 689 | # local-only history |
|
690 | 690 | self.ui.note(_(b'removing subrepo %s\n') % subrelpath(self)) |
|
691 | 691 | hg.clean(self._repo, node.nullid, False) |
|
692 | 692 | |
|
693 | 693 | def _get(self, state): |
|
694 | 694 | source, revision, kind = state |
|
695 | 695 | parentrepo = self._repo._subparent |
|
696 | 696 | |
|
697 | 697 | if revision in self._repo.unfiltered(): |
|
698 | 698 | # Allow shared subrepos tracked at null to setup the sharedpath |
|
699 | 699 | if len(self._repo) != 0 or not parentrepo.shared(): |
|
700 | 700 | return True |
|
701 | 701 | self._repo._subsource = source |
|
702 | 702 | srcurl = _abssource(self._repo) |
|
703 | 703 | |
|
704 | 704 | # Defer creating the peer until after the status message is logged, in |
|
705 | 705 | # case there are network problems. |
|
706 | 706 | getpeer = lambda: hg.peer(self._repo, {}, srcurl) |
|
707 | 707 | |
|
708 | 708 | if len(self._repo) == 0: |
|
709 | 709 | # use self._repo.vfs instead of self.wvfs to remove .hg only |
|
710 | 710 | self._repo.vfs.rmtree() |
|
711 | 711 | |
|
712 | 712 | # A remote subrepo could be shared if there is a local copy |
|
713 | 713 | # relative to the parent's share source. But clone pooling doesn't |
|
714 | 714 | # assemble the repos in a tree, so that can't be consistently done. |
|
715 | 715 | # A simpler option is for the user to configure clone pooling, and |
|
716 | 716 | # work with that. |
|
717 | 717 | if parentrepo.shared() and hg.islocal(srcurl): |
|
718 | 718 | self.ui.status( |
|
719 | 719 | _(b'sharing subrepo %s from %s\n') |
|
720 | 720 | % (subrelpath(self), srcurl) |
|
721 | 721 | ) |
|
722 | 722 | shared = hg.share( |
|
723 | 723 | self._repo._subparent.baseui, |
|
724 | 724 | getpeer(), |
|
725 | 725 | self._repo.root, |
|
726 | 726 | update=False, |
|
727 | 727 | bookmarks=False, |
|
728 | 728 | ) |
|
729 | 729 | self._repo = shared.local() |
|
730 | 730 | else: |
|
731 | 731 | # TODO: find a common place for this and this code in the |
|
732 | 732 | # share.py wrap of the clone command. |
|
733 | 733 | if parentrepo.shared(): |
|
734 | 734 | pool = self.ui.config(b'share', b'pool') |
|
735 | 735 | if pool: |
|
736 | 736 | pool = util.expandpath(pool) |
|
737 | 737 | |
|
738 | 738 | shareopts = { |
|
739 | 739 | b'pool': pool, |
|
740 | 740 | b'mode': self.ui.config(b'share', b'poolnaming'), |
|
741 | 741 | } |
|
742 | 742 | else: |
|
743 | 743 | shareopts = {} |
|
744 | 744 | |
|
745 | 745 | self.ui.status( |
|
746 | 746 | _(b'cloning subrepo %s from %s\n') |
|
747 | 747 | % (subrelpath(self), util.hidepassword(srcurl)) |
|
748 | 748 | ) |
|
749 | 749 | other, cloned = hg.clone( |
|
750 | 750 | self._repo._subparent.baseui, |
|
751 | 751 | {}, |
|
752 | 752 | getpeer(), |
|
753 | 753 | self._repo.root, |
|
754 | 754 | update=False, |
|
755 | 755 | shareopts=shareopts, |
|
756 | 756 | ) |
|
757 | 757 | self._repo = cloned.local() |
|
758 | 758 | self._initrepo(parentrepo, source, create=True) |
|
759 | 759 | self._cachestorehash(srcurl) |
|
760 | 760 | else: |
|
761 | 761 | self.ui.status( |
|
762 | 762 | _(b'pulling subrepo %s from %s\n') |
|
763 | 763 | % (subrelpath(self), util.hidepassword(srcurl)) |
|
764 | 764 | ) |
|
765 | 765 | cleansub = self.storeclean(srcurl) |
|
766 | 766 | exchange.pull(self._repo, getpeer()) |
|
767 | 767 | if cleansub: |
|
768 | 768 | # keep the repo clean after pull |
|
769 | 769 | self._cachestorehash(srcurl) |
|
770 | 770 | return False |
|
771 | 771 | |
|
772 | 772 | @annotatesubrepoerror |
|
773 | 773 | def get(self, state, overwrite=False): |
|
774 | 774 | inrepo = self._get(state) |
|
775 | 775 | source, revision, kind = state |
|
776 | 776 | repo = self._repo |
|
777 | 777 | repo.ui.debug(b"getting subrepo %s\n" % self._path) |
|
778 | 778 | if inrepo: |
|
779 | 779 | urepo = repo.unfiltered() |
|
780 | 780 | ctx = urepo[revision] |
|
781 | 781 | if ctx.hidden(): |
|
782 | 782 | urepo.ui.warn( |
|
783 | 783 | _(b'revision %s in subrepository "%s" is hidden\n') |
|
784 | 784 | % (revision[0:12], self._path) |
|
785 | 785 | ) |
|
786 | 786 | repo = urepo |
|
787 | 787 | if overwrite: |
|
788 | 788 | merge.clean_update(repo[revision]) |
|
789 | 789 | else: |
|
790 |
|
|
|
790 | merge.update(repo[revision]) | |
|
791 | 791 | |
|
792 | 792 | @annotatesubrepoerror |
|
793 | 793 | def merge(self, state): |
|
794 | 794 | self._get(state) |
|
795 | 795 | cur = self._repo[b'.'] |
|
796 | 796 | dst = self._repo[state[1]] |
|
797 | 797 | anc = dst.ancestor(cur) |
|
798 | 798 | |
|
799 | 799 | def mergefunc(): |
|
800 | 800 | if anc == cur and dst.branch() == cur.branch(): |
|
801 | 801 | self.ui.debug( |
|
802 | 802 | b'updating subrepository "%s"\n' % subrelpath(self) |
|
803 | 803 | ) |
|
804 | 804 | hg.update(self._repo, state[1]) |
|
805 | 805 | elif anc == dst: |
|
806 | 806 | self.ui.debug( |
|
807 | 807 | b'skipping subrepository "%s"\n' % subrelpath(self) |
|
808 | 808 | ) |
|
809 | 809 | else: |
|
810 | 810 | self.ui.debug( |
|
811 | 811 | b'merging subrepository "%s"\n' % subrelpath(self) |
|
812 | 812 | ) |
|
813 | 813 | hg.merge(dst, remind=False) |
|
814 | 814 | |
|
815 | 815 | wctx = self._repo[None] |
|
816 | 816 | if self.dirty(): |
|
817 | 817 | if anc != dst: |
|
818 | 818 | if _updateprompt(self.ui, self, wctx.dirty(), cur, dst): |
|
819 | 819 | mergefunc() |
|
820 | 820 | else: |
|
821 | 821 | mergefunc() |
|
822 | 822 | else: |
|
823 | 823 | mergefunc() |
|
824 | 824 | |
|
825 | 825 | @annotatesubrepoerror |
|
826 | 826 | def push(self, opts): |
|
827 | 827 | force = opts.get(b'force') |
|
828 | 828 | newbranch = opts.get(b'new_branch') |
|
829 | 829 | ssh = opts.get(b'ssh') |
|
830 | 830 | |
|
831 | 831 | # push subrepos depth-first for coherent ordering |
|
832 | 832 | c = self._repo[b'.'] |
|
833 | 833 | subs = c.substate # only repos that are committed |
|
834 | 834 | for s in sorted(subs): |
|
835 | 835 | if c.sub(s).push(opts) == 0: |
|
836 | 836 | return False |
|
837 | 837 | |
|
838 | 838 | dsturl = _abssource(self._repo, True) |
|
839 | 839 | if not force: |
|
840 | 840 | if self.storeclean(dsturl): |
|
841 | 841 | self.ui.status( |
|
842 | 842 | _(b'no changes made to subrepo %s since last push to %s\n') |
|
843 | 843 | % (subrelpath(self), util.hidepassword(dsturl)) |
|
844 | 844 | ) |
|
845 | 845 | return None |
|
846 | 846 | self.ui.status( |
|
847 | 847 | _(b'pushing subrepo %s to %s\n') |
|
848 | 848 | % (subrelpath(self), util.hidepassword(dsturl)) |
|
849 | 849 | ) |
|
850 | 850 | other = hg.peer(self._repo, {b'ssh': ssh}, dsturl) |
|
851 | 851 | res = exchange.push(self._repo, other, force, newbranch=newbranch) |
|
852 | 852 | |
|
853 | 853 | # the repo is now clean |
|
854 | 854 | self._cachestorehash(dsturl) |
|
855 | 855 | return res.cgresult |
|
856 | 856 | |
|
857 | 857 | @annotatesubrepoerror |
|
858 | 858 | def outgoing(self, ui, dest, opts): |
|
859 | 859 | if b'rev' in opts or b'branch' in opts: |
|
860 | 860 | opts = copy.copy(opts) |
|
861 | 861 | opts.pop(b'rev', None) |
|
862 | 862 | opts.pop(b'branch', None) |
|
863 | 863 | return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts) |
|
864 | 864 | |
|
865 | 865 | @annotatesubrepoerror |
|
866 | 866 | def incoming(self, ui, source, opts): |
|
867 | 867 | if b'rev' in opts or b'branch' in opts: |
|
868 | 868 | opts = copy.copy(opts) |
|
869 | 869 | opts.pop(b'rev', None) |
|
870 | 870 | opts.pop(b'branch', None) |
|
871 | 871 | return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts) |
|
872 | 872 | |
|
873 | 873 | @annotatesubrepoerror |
|
874 | 874 | def files(self): |
|
875 | 875 | rev = self._state[1] |
|
876 | 876 | ctx = self._repo[rev] |
|
877 | 877 | return ctx.manifest().keys() |
|
878 | 878 | |
|
879 | 879 | def filedata(self, name, decode): |
|
880 | 880 | rev = self._state[1] |
|
881 | 881 | data = self._repo[rev][name].data() |
|
882 | 882 | if decode: |
|
883 | 883 | data = self._repo.wwritedata(name, data) |
|
884 | 884 | return data |
|
885 | 885 | |
|
886 | 886 | def fileflags(self, name): |
|
887 | 887 | rev = self._state[1] |
|
888 | 888 | ctx = self._repo[rev] |
|
889 | 889 | return ctx.flags(name) |
|
890 | 890 | |
|
891 | 891 | @annotatesubrepoerror |
|
892 | 892 | def printfiles(self, ui, m, uipathfn, fm, fmt, subrepos): |
|
893 | 893 | # If the parent context is a workingctx, use the workingctx here for |
|
894 | 894 | # consistency. |
|
895 | 895 | if self._ctx.rev() is None: |
|
896 | 896 | ctx = self._repo[None] |
|
897 | 897 | else: |
|
898 | 898 | rev = self._state[1] |
|
899 | 899 | ctx = self._repo[rev] |
|
900 | 900 | return cmdutil.files(ui, ctx, m, uipathfn, fm, fmt, subrepos) |
|
901 | 901 | |
|
902 | 902 | @annotatesubrepoerror |
|
903 | 903 | def matchfileset(self, cwd, expr, badfn=None): |
|
904 | 904 | if self._ctx.rev() is None: |
|
905 | 905 | ctx = self._repo[None] |
|
906 | 906 | else: |
|
907 | 907 | rev = self._state[1] |
|
908 | 908 | ctx = self._repo[rev] |
|
909 | 909 | |
|
910 | 910 | matchers = [ctx.matchfileset(cwd, expr, badfn=badfn)] |
|
911 | 911 | |
|
912 | 912 | for subpath in ctx.substate: |
|
913 | 913 | sub = ctx.sub(subpath) |
|
914 | 914 | |
|
915 | 915 | try: |
|
916 | 916 | sm = sub.matchfileset(cwd, expr, badfn=badfn) |
|
917 | 917 | pm = matchmod.prefixdirmatcher(subpath, sm, badfn=badfn) |
|
918 | 918 | matchers.append(pm) |
|
919 | 919 | except error.LookupError: |
|
920 | 920 | self.ui.status( |
|
921 | 921 | _(b"skipping missing subrepository: %s\n") |
|
922 | 922 | % self.wvfs.reljoin(reporelpath(self), subpath) |
|
923 | 923 | ) |
|
924 | 924 | if len(matchers) == 1: |
|
925 | 925 | return matchers[0] |
|
926 | 926 | return matchmod.unionmatcher(matchers) |
|
927 | 927 | |
|
928 | 928 | def walk(self, match): |
|
929 | 929 | ctx = self._repo[None] |
|
930 | 930 | return ctx.walk(match) |
|
931 | 931 | |
|
932 | 932 | @annotatesubrepoerror |
|
933 | 933 | def forget(self, match, prefix, uipathfn, dryrun, interactive): |
|
934 | 934 | return cmdutil.forget( |
|
935 | 935 | self.ui, |
|
936 | 936 | self._repo, |
|
937 | 937 | match, |
|
938 | 938 | prefix, |
|
939 | 939 | uipathfn, |
|
940 | 940 | True, |
|
941 | 941 | dryrun=dryrun, |
|
942 | 942 | interactive=interactive, |
|
943 | 943 | ) |
|
944 | 944 | |
|
945 | 945 | @annotatesubrepoerror |
|
946 | 946 | def removefiles( |
|
947 | 947 | self, |
|
948 | 948 | matcher, |
|
949 | 949 | prefix, |
|
950 | 950 | uipathfn, |
|
951 | 951 | after, |
|
952 | 952 | force, |
|
953 | 953 | subrepos, |
|
954 | 954 | dryrun, |
|
955 | 955 | warnings, |
|
956 | 956 | ): |
|
957 | 957 | return cmdutil.remove( |
|
958 | 958 | self.ui, |
|
959 | 959 | self._repo, |
|
960 | 960 | matcher, |
|
961 | 961 | prefix, |
|
962 | 962 | uipathfn, |
|
963 | 963 | after, |
|
964 | 964 | force, |
|
965 | 965 | subrepos, |
|
966 | 966 | dryrun, |
|
967 | 967 | ) |
|
968 | 968 | |
|
969 | 969 | @annotatesubrepoerror |
|
970 | 970 | def revert(self, substate, *pats, **opts): |
|
971 | 971 | # reverting a subrepo is a 2 step process: |
|
972 | 972 | # 1. if the no_backup is not set, revert all modified |
|
973 | 973 | # files inside the subrepo |
|
974 | 974 | # 2. update the subrepo to the revision specified in |
|
975 | 975 | # the corresponding substate dictionary |
|
976 | 976 | self.ui.status(_(b'reverting subrepo %s\n') % substate[0]) |
|
977 | 977 | if not opts.get('no_backup'): |
|
978 | 978 | # Revert all files on the subrepo, creating backups |
|
979 | 979 | # Note that this will not recursively revert subrepos |
|
980 | 980 | # We could do it if there was a set:subrepos() predicate |
|
981 | 981 | opts = opts.copy() |
|
982 | 982 | opts['date'] = None |
|
983 | 983 | opts['rev'] = substate[1] |
|
984 | 984 | |
|
985 | 985 | self.filerevert(*pats, **opts) |
|
986 | 986 | |
|
987 | 987 | # Update the repo to the revision specified in the given substate |
|
988 | 988 | if not opts.get('dry_run'): |
|
989 | 989 | self.get(substate, overwrite=True) |
|
990 | 990 | |
|
991 | 991 | def filerevert(self, *pats, **opts): |
|
992 | 992 | ctx = self._repo[opts['rev']] |
|
993 | 993 | if opts.get('all'): |
|
994 | 994 | pats = [b'set:modified()'] |
|
995 | 995 | else: |
|
996 | 996 | pats = [] |
|
997 | 997 | cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts) |
|
998 | 998 | |
|
999 | 999 | def shortid(self, revid): |
|
1000 | 1000 | return revid[:12] |
|
1001 | 1001 | |
|
1002 | 1002 | @annotatesubrepoerror |
|
1003 | 1003 | def unshare(self): |
|
1004 | 1004 | # subrepo inherently violates our import layering rules |
|
1005 | 1005 | # because it wants to make repo objects from deep inside the stack |
|
1006 | 1006 | # so we manually delay the circular imports to not break |
|
1007 | 1007 | # scripts that don't use our demand-loading |
|
1008 | 1008 | global hg |
|
1009 | 1009 | from . import hg as h |
|
1010 | 1010 | |
|
1011 | 1011 | hg = h |
|
1012 | 1012 | |
|
1013 | 1013 | # Nothing prevents a user from sharing in a repo, and then making that a |
|
1014 | 1014 | # subrepo. Alternately, the previous unshare attempt may have failed |
|
1015 | 1015 | # part way through. So recurse whether or not this layer is shared. |
|
1016 | 1016 | if self._repo.shared(): |
|
1017 | 1017 | self.ui.status(_(b"unsharing subrepo '%s'\n") % self._relpath) |
|
1018 | 1018 | |
|
1019 | 1019 | hg.unshare(self.ui, self._repo) |
|
1020 | 1020 | |
|
1021 | 1021 | def verify(self, onpush=False): |
|
1022 | 1022 | try: |
|
1023 | 1023 | rev = self._state[1] |
|
1024 | 1024 | ctx = self._repo.unfiltered()[rev] |
|
1025 | 1025 | if ctx.hidden(): |
|
1026 | 1026 | # Since hidden revisions aren't pushed/pulled, it seems worth an |
|
1027 | 1027 | # explicit warning. |
|
1028 | 1028 | msg = _(b"subrepo '%s' is hidden in revision %s") % ( |
|
1029 | 1029 | self._relpath, |
|
1030 | 1030 | node.short(self._ctx.node()), |
|
1031 | 1031 | ) |
|
1032 | 1032 | |
|
1033 | 1033 | if onpush: |
|
1034 | 1034 | raise error.Abort(msg) |
|
1035 | 1035 | else: |
|
1036 | 1036 | self._repo.ui.warn(b'%s\n' % msg) |
|
1037 | 1037 | return 0 |
|
1038 | 1038 | except error.RepoLookupError: |
|
1039 | 1039 | # A missing subrepo revision may be a case of needing to pull it, so |
|
1040 | 1040 | # don't treat this as an error for `hg verify`. |
|
1041 | 1041 | msg = _(b"subrepo '%s' not found in revision %s") % ( |
|
1042 | 1042 | self._relpath, |
|
1043 | 1043 | node.short(self._ctx.node()), |
|
1044 | 1044 | ) |
|
1045 | 1045 | |
|
1046 | 1046 | if onpush: |
|
1047 | 1047 | raise error.Abort(msg) |
|
1048 | 1048 | else: |
|
1049 | 1049 | self._repo.ui.warn(b'%s\n' % msg) |
|
1050 | 1050 | return 0 |
|
1051 | 1051 | |
|
1052 | 1052 | @propertycache |
|
1053 | 1053 | def wvfs(self): |
|
1054 | 1054 | """return own wvfs for efficiency and consistency |
|
1055 | 1055 | """ |
|
1056 | 1056 | return self._repo.wvfs |
|
1057 | 1057 | |
|
1058 | 1058 | @propertycache |
|
1059 | 1059 | def _relpath(self): |
|
1060 | 1060 | """return path to this subrepository as seen from outermost repository |
|
1061 | 1061 | """ |
|
1062 | 1062 | # Keep consistent dir separators by avoiding vfs.join(self._path) |
|
1063 | 1063 | return reporelpath(self._repo) |
|
1064 | 1064 | |
|
1065 | 1065 | |
|
1066 | 1066 | class svnsubrepo(abstractsubrepo): |
|
1067 | 1067 | def __init__(self, ctx, path, state, allowcreate): |
|
1068 | 1068 | super(svnsubrepo, self).__init__(ctx, path) |
|
1069 | 1069 | self._state = state |
|
1070 | 1070 | self._exe = procutil.findexe(b'svn') |
|
1071 | 1071 | if not self._exe: |
|
1072 | 1072 | raise error.Abort( |
|
1073 | 1073 | _(b"'svn' executable not found for subrepo '%s'") % self._path |
|
1074 | 1074 | ) |
|
1075 | 1075 | |
|
1076 | 1076 | def _svncommand(self, commands, filename=b'', failok=False): |
|
1077 | 1077 | cmd = [self._exe] |
|
1078 | 1078 | extrakw = {} |
|
1079 | 1079 | if not self.ui.interactive(): |
|
1080 | 1080 | # Making stdin be a pipe should prevent svn from behaving |
|
1081 | 1081 | # interactively even if we can't pass --non-interactive. |
|
1082 | 1082 | extrakw['stdin'] = subprocess.PIPE |
|
1083 | 1083 | # Starting in svn 1.5 --non-interactive is a global flag |
|
1084 | 1084 | # instead of being per-command, but we need to support 1.4 so |
|
1085 | 1085 | # we have to be intelligent about what commands take |
|
1086 | 1086 | # --non-interactive. |
|
1087 | 1087 | if commands[0] in (b'update', b'checkout', b'commit'): |
|
1088 | 1088 | cmd.append(b'--non-interactive') |
|
1089 | 1089 | cmd.extend(commands) |
|
1090 | 1090 | if filename is not None: |
|
1091 | 1091 | path = self.wvfs.reljoin( |
|
1092 | 1092 | self._ctx.repo().origroot, self._path, filename |
|
1093 | 1093 | ) |
|
1094 | 1094 | cmd.append(path) |
|
1095 | 1095 | env = dict(encoding.environ) |
|
1096 | 1096 | # Avoid localized output, preserve current locale for everything else. |
|
1097 | 1097 | lc_all = env.get(b'LC_ALL') |
|
1098 | 1098 | if lc_all: |
|
1099 | 1099 | env[b'LANG'] = lc_all |
|
1100 | 1100 | del env[b'LC_ALL'] |
|
1101 | 1101 | env[b'LC_MESSAGES'] = b'C' |
|
1102 | 1102 | p = subprocess.Popen( |
|
1103 | 1103 | pycompat.rapply(procutil.tonativestr, cmd), |
|
1104 | 1104 | bufsize=-1, |
|
1105 | 1105 | close_fds=procutil.closefds, |
|
1106 | 1106 | stdout=subprocess.PIPE, |
|
1107 | 1107 | stderr=subprocess.PIPE, |
|
1108 | 1108 | env=procutil.tonativeenv(env), |
|
1109 | 1109 | **extrakw |
|
1110 | 1110 | ) |
|
1111 | 1111 | stdout, stderr = map(util.fromnativeeol, p.communicate()) |
|
1112 | 1112 | stderr = stderr.strip() |
|
1113 | 1113 | if not failok: |
|
1114 | 1114 | if p.returncode: |
|
1115 | 1115 | raise error.Abort( |
|
1116 | 1116 | stderr or b'exited with code %d' % p.returncode |
|
1117 | 1117 | ) |
|
1118 | 1118 | if stderr: |
|
1119 | 1119 | self.ui.warn(stderr + b'\n') |
|
1120 | 1120 | return stdout, stderr |
|
1121 | 1121 | |
|
1122 | 1122 | @propertycache |
|
1123 | 1123 | def _svnversion(self): |
|
1124 | 1124 | output, err = self._svncommand( |
|
1125 | 1125 | [b'--version', b'--quiet'], filename=None |
|
1126 | 1126 | ) |
|
1127 | 1127 | m = re.search(br'^(\d+)\.(\d+)', output) |
|
1128 | 1128 | if not m: |
|
1129 | 1129 | raise error.Abort(_(b'cannot retrieve svn tool version')) |
|
1130 | 1130 | return (int(m.group(1)), int(m.group(2))) |
|
1131 | 1131 | |
|
1132 | 1132 | def _svnmissing(self): |
|
1133 | 1133 | return not self.wvfs.exists(b'.svn') |
|
1134 | 1134 | |
|
1135 | 1135 | def _wcrevs(self): |
|
1136 | 1136 | # Get the working directory revision as well as the last |
|
1137 | 1137 | # commit revision so we can compare the subrepo state with |
|
1138 | 1138 | # both. We used to store the working directory one. |
|
1139 | 1139 | output, err = self._svncommand([b'info', b'--xml']) |
|
1140 | 1140 | doc = xml.dom.minidom.parseString(output) |
|
1141 | 1141 | entries = doc.getElementsByTagName('entry') |
|
1142 | 1142 | lastrev, rev = b'0', b'0' |
|
1143 | 1143 | if entries: |
|
1144 | 1144 | rev = pycompat.bytestr(entries[0].getAttribute('revision')) or b'0' |
|
1145 | 1145 | commits = entries[0].getElementsByTagName('commit') |
|
1146 | 1146 | if commits: |
|
1147 | 1147 | lastrev = ( |
|
1148 | 1148 | pycompat.bytestr(commits[0].getAttribute('revision')) |
|
1149 | 1149 | or b'0' |
|
1150 | 1150 | ) |
|
1151 | 1151 | return (lastrev, rev) |
|
1152 | 1152 | |
|
1153 | 1153 | def _wcrev(self): |
|
1154 | 1154 | return self._wcrevs()[0] |
|
1155 | 1155 | |
|
1156 | 1156 | def _wcchanged(self): |
|
1157 | 1157 | """Return (changes, extchanges, missing) where changes is True |
|
1158 | 1158 | if the working directory was changed, extchanges is |
|
1159 | 1159 | True if any of these changes concern an external entry and missing |
|
1160 | 1160 | is True if any change is a missing entry. |
|
1161 | 1161 | """ |
|
1162 | 1162 | output, err = self._svncommand([b'status', b'--xml']) |
|
1163 | 1163 | externals, changes, missing = [], [], [] |
|
1164 | 1164 | doc = xml.dom.minidom.parseString(output) |
|
1165 | 1165 | for e in doc.getElementsByTagName('entry'): |
|
1166 | 1166 | s = e.getElementsByTagName('wc-status') |
|
1167 | 1167 | if not s: |
|
1168 | 1168 | continue |
|
1169 | 1169 | item = s[0].getAttribute('item') |
|
1170 | 1170 | props = s[0].getAttribute('props') |
|
1171 | 1171 | path = e.getAttribute('path').encode('utf8') |
|
1172 | 1172 | if item == 'external': |
|
1173 | 1173 | externals.append(path) |
|
1174 | 1174 | elif item == 'missing': |
|
1175 | 1175 | missing.append(path) |
|
1176 | 1176 | if item not in ( |
|
1177 | 1177 | '', |
|
1178 | 1178 | 'normal', |
|
1179 | 1179 | 'unversioned', |
|
1180 | 1180 | 'external', |
|
1181 | 1181 | ) or props not in ('', 'none', 'normal'): |
|
1182 | 1182 | changes.append(path) |
|
1183 | 1183 | for path in changes: |
|
1184 | 1184 | for ext in externals: |
|
1185 | 1185 | if path == ext or path.startswith(ext + pycompat.ossep): |
|
1186 | 1186 | return True, True, bool(missing) |
|
1187 | 1187 | return bool(changes), False, bool(missing) |
|
1188 | 1188 | |
|
1189 | 1189 | @annotatesubrepoerror |
|
1190 | 1190 | def dirty(self, ignoreupdate=False, missing=False): |
|
1191 | 1191 | if self._svnmissing(): |
|
1192 | 1192 | return self._state[1] != b'' |
|
1193 | 1193 | wcchanged = self._wcchanged() |
|
1194 | 1194 | changed = wcchanged[0] or (missing and wcchanged[2]) |
|
1195 | 1195 | if not changed: |
|
1196 | 1196 | if self._state[1] in self._wcrevs() or ignoreupdate: |
|
1197 | 1197 | return False |
|
1198 | 1198 | return True |
|
1199 | 1199 | |
|
1200 | 1200 | def basestate(self): |
|
1201 | 1201 | lastrev, rev = self._wcrevs() |
|
1202 | 1202 | if lastrev != rev: |
|
1203 | 1203 | # Last committed rev is not the same than rev. We would |
|
1204 | 1204 | # like to take lastrev but we do not know if the subrepo |
|
1205 | 1205 | # URL exists at lastrev. Test it and fallback to rev it |
|
1206 | 1206 | # is not there. |
|
1207 | 1207 | try: |
|
1208 | 1208 | self._svncommand( |
|
1209 | 1209 | [b'list', b'%s@%s' % (self._state[0], lastrev)] |
|
1210 | 1210 | ) |
|
1211 | 1211 | return lastrev |
|
1212 | 1212 | except error.Abort: |
|
1213 | 1213 | pass |
|
1214 | 1214 | return rev |
|
1215 | 1215 | |
|
1216 | 1216 | @annotatesubrepoerror |
|
1217 | 1217 | def commit(self, text, user, date): |
|
1218 | 1218 | # user and date are out of our hands since svn is centralized |
|
1219 | 1219 | changed, extchanged, missing = self._wcchanged() |
|
1220 | 1220 | if not changed: |
|
1221 | 1221 | return self.basestate() |
|
1222 | 1222 | if extchanged: |
|
1223 | 1223 | # Do not try to commit externals |
|
1224 | 1224 | raise error.Abort(_(b'cannot commit svn externals')) |
|
1225 | 1225 | if missing: |
|
1226 | 1226 | # svn can commit with missing entries but aborting like hg |
|
1227 | 1227 | # seems a better approach. |
|
1228 | 1228 | raise error.Abort(_(b'cannot commit missing svn entries')) |
|
1229 | 1229 | commitinfo, err = self._svncommand([b'commit', b'-m', text]) |
|
1230 | 1230 | self.ui.status(commitinfo) |
|
1231 | 1231 | newrev = re.search(b'Committed revision ([0-9]+).', commitinfo) |
|
1232 | 1232 | if not newrev: |
|
1233 | 1233 | if not commitinfo.strip(): |
|
1234 | 1234 | # Sometimes, our definition of "changed" differs from |
|
1235 | 1235 | # svn one. For instance, svn ignores missing files |
|
1236 | 1236 | # when committing. If there are only missing files, no |
|
1237 | 1237 | # commit is made, no output and no error code. |
|
1238 | 1238 | raise error.Abort(_(b'failed to commit svn changes')) |
|
1239 | 1239 | raise error.Abort(commitinfo.splitlines()[-1]) |
|
1240 | 1240 | newrev = newrev.groups()[0] |
|
1241 | 1241 | self.ui.status(self._svncommand([b'update', b'-r', newrev])[0]) |
|
1242 | 1242 | return newrev |
|
1243 | 1243 | |
|
1244 | 1244 | @annotatesubrepoerror |
|
1245 | 1245 | def remove(self): |
|
1246 | 1246 | if self.dirty(): |
|
1247 | 1247 | self.ui.warn( |
|
1248 | 1248 | _(b'not removing repo %s because it has changes.\n') |
|
1249 | 1249 | % self._path |
|
1250 | 1250 | ) |
|
1251 | 1251 | return |
|
1252 | 1252 | self.ui.note(_(b'removing subrepo %s\n') % self._path) |
|
1253 | 1253 | |
|
1254 | 1254 | self.wvfs.rmtree(forcibly=True) |
|
1255 | 1255 | try: |
|
1256 | 1256 | pwvfs = self._ctx.repo().wvfs |
|
1257 | 1257 | pwvfs.removedirs(pwvfs.dirname(self._path)) |
|
1258 | 1258 | except OSError: |
|
1259 | 1259 | pass |
|
1260 | 1260 | |
|
1261 | 1261 | @annotatesubrepoerror |
|
1262 | 1262 | def get(self, state, overwrite=False): |
|
1263 | 1263 | if overwrite: |
|
1264 | 1264 | self._svncommand([b'revert', b'--recursive']) |
|
1265 | 1265 | args = [b'checkout'] |
|
1266 | 1266 | if self._svnversion >= (1, 5): |
|
1267 | 1267 | args.append(b'--force') |
|
1268 | 1268 | # The revision must be specified at the end of the URL to properly |
|
1269 | 1269 | # update to a directory which has since been deleted and recreated. |
|
1270 | 1270 | args.append(b'%s@%s' % (state[0], state[1])) |
|
1271 | 1271 | |
|
1272 | 1272 | # SEC: check that the ssh url is safe |
|
1273 | 1273 | util.checksafessh(state[0]) |
|
1274 | 1274 | |
|
1275 | 1275 | status, err = self._svncommand(args, failok=True) |
|
1276 | 1276 | _sanitize(self.ui, self.wvfs, b'.svn') |
|
1277 | 1277 | if not re.search(b'Checked out revision [0-9]+.', status): |
|
1278 | 1278 | if b'is already a working copy for a different URL' in err and ( |
|
1279 | 1279 | self._wcchanged()[:2] == (False, False) |
|
1280 | 1280 | ): |
|
1281 | 1281 | # obstructed but clean working copy, so just blow it away. |
|
1282 | 1282 | self.remove() |
|
1283 | 1283 | self.get(state, overwrite=False) |
|
1284 | 1284 | return |
|
1285 | 1285 | raise error.Abort((status or err).splitlines()[-1]) |
|
1286 | 1286 | self.ui.status(status) |
|
1287 | 1287 | |
|
1288 | 1288 | @annotatesubrepoerror |
|
1289 | 1289 | def merge(self, state): |
|
1290 | 1290 | old = self._state[1] |
|
1291 | 1291 | new = state[1] |
|
1292 | 1292 | wcrev = self._wcrev() |
|
1293 | 1293 | if new != wcrev: |
|
1294 | 1294 | dirty = old == wcrev or self._wcchanged()[0] |
|
1295 | 1295 | if _updateprompt(self.ui, self, dirty, wcrev, new): |
|
1296 | 1296 | self.get(state, False) |
|
1297 | 1297 | |
|
1298 | 1298 | def push(self, opts): |
|
1299 | 1299 | # push is a no-op for SVN |
|
1300 | 1300 | return True |
|
1301 | 1301 | |
|
1302 | 1302 | @annotatesubrepoerror |
|
1303 | 1303 | def files(self): |
|
1304 | 1304 | output = self._svncommand([b'list', b'--recursive', b'--xml'])[0] |
|
1305 | 1305 | doc = xml.dom.minidom.parseString(output) |
|
1306 | 1306 | paths = [] |
|
1307 | 1307 | for e in doc.getElementsByTagName('entry'): |
|
1308 | 1308 | kind = pycompat.bytestr(e.getAttribute('kind')) |
|
1309 | 1309 | if kind != b'file': |
|
1310 | 1310 | continue |
|
1311 | 1311 | name = ''.join( |
|
1312 | 1312 | c.data |
|
1313 | 1313 | for c in e.getElementsByTagName('name')[0].childNodes |
|
1314 | 1314 | if c.nodeType == c.TEXT_NODE |
|
1315 | 1315 | ) |
|
1316 | 1316 | paths.append(name.encode('utf8')) |
|
1317 | 1317 | return paths |
|
1318 | 1318 | |
|
1319 | 1319 | def filedata(self, name, decode): |
|
1320 | 1320 | return self._svncommand([b'cat'], name)[0] |
|
1321 | 1321 | |
|
1322 | 1322 | |
|
1323 | 1323 | class gitsubrepo(abstractsubrepo): |
|
1324 | 1324 | def __init__(self, ctx, path, state, allowcreate): |
|
1325 | 1325 | super(gitsubrepo, self).__init__(ctx, path) |
|
1326 | 1326 | self._state = state |
|
1327 | 1327 | self._abspath = ctx.repo().wjoin(path) |
|
1328 | 1328 | self._subparent = ctx.repo() |
|
1329 | 1329 | self._ensuregit() |
|
1330 | 1330 | |
|
1331 | 1331 | def _ensuregit(self): |
|
1332 | 1332 | try: |
|
1333 | 1333 | self._gitexecutable = b'git' |
|
1334 | 1334 | out, err = self._gitnodir([b'--version']) |
|
1335 | 1335 | except OSError as e: |
|
1336 | 1336 | genericerror = _(b"error executing git for subrepo '%s': %s") |
|
1337 | 1337 | notfoundhint = _(b"check git is installed and in your PATH") |
|
1338 | 1338 | if e.errno != errno.ENOENT: |
|
1339 | 1339 | raise error.Abort( |
|
1340 | 1340 | genericerror % (self._path, encoding.strtolocal(e.strerror)) |
|
1341 | 1341 | ) |
|
1342 | 1342 | elif pycompat.iswindows: |
|
1343 | 1343 | try: |
|
1344 | 1344 | self._gitexecutable = b'git.cmd' |
|
1345 | 1345 | out, err = self._gitnodir([b'--version']) |
|
1346 | 1346 | except OSError as e2: |
|
1347 | 1347 | if e2.errno == errno.ENOENT: |
|
1348 | 1348 | raise error.Abort( |
|
1349 | 1349 | _( |
|
1350 | 1350 | b"couldn't find 'git' or 'git.cmd'" |
|
1351 | 1351 | b" for subrepo '%s'" |
|
1352 | 1352 | ) |
|
1353 | 1353 | % self._path, |
|
1354 | 1354 | hint=notfoundhint, |
|
1355 | 1355 | ) |
|
1356 | 1356 | else: |
|
1357 | 1357 | raise error.Abort( |
|
1358 | 1358 | genericerror |
|
1359 | 1359 | % (self._path, encoding.strtolocal(e2.strerror)) |
|
1360 | 1360 | ) |
|
1361 | 1361 | else: |
|
1362 | 1362 | raise error.Abort( |
|
1363 | 1363 | _(b"couldn't find git for subrepo '%s'") % self._path, |
|
1364 | 1364 | hint=notfoundhint, |
|
1365 | 1365 | ) |
|
1366 | 1366 | versionstatus = self._checkversion(out) |
|
1367 | 1367 | if versionstatus == b'unknown': |
|
1368 | 1368 | self.ui.warn(_(b'cannot retrieve git version\n')) |
|
1369 | 1369 | elif versionstatus == b'abort': |
|
1370 | 1370 | raise error.Abort( |
|
1371 | 1371 | _(b'git subrepo requires at least 1.6.0 or later') |
|
1372 | 1372 | ) |
|
1373 | 1373 | elif versionstatus == b'warning': |
|
1374 | 1374 | self.ui.warn(_(b'git subrepo requires at least 1.6.0 or later\n')) |
|
1375 | 1375 | |
|
1376 | 1376 | @staticmethod |
|
1377 | 1377 | def _gitversion(out): |
|
1378 | 1378 | m = re.search(br'^git version (\d+)\.(\d+)\.(\d+)', out) |
|
1379 | 1379 | if m: |
|
1380 | 1380 | return (int(m.group(1)), int(m.group(2)), int(m.group(3))) |
|
1381 | 1381 | |
|
1382 | 1382 | m = re.search(br'^git version (\d+)\.(\d+)', out) |
|
1383 | 1383 | if m: |
|
1384 | 1384 | return (int(m.group(1)), int(m.group(2)), 0) |
|
1385 | 1385 | |
|
1386 | 1386 | return -1 |
|
1387 | 1387 | |
|
1388 | 1388 | @staticmethod |
|
1389 | 1389 | def _checkversion(out): |
|
1390 | 1390 | '''ensure git version is new enough |
|
1391 | 1391 | |
|
1392 | 1392 | >>> _checkversion = gitsubrepo._checkversion |
|
1393 | 1393 | >>> _checkversion(b'git version 1.6.0') |
|
1394 | 1394 | 'ok' |
|
1395 | 1395 | >>> _checkversion(b'git version 1.8.5') |
|
1396 | 1396 | 'ok' |
|
1397 | 1397 | >>> _checkversion(b'git version 1.4.0') |
|
1398 | 1398 | 'abort' |
|
1399 | 1399 | >>> _checkversion(b'git version 1.5.0') |
|
1400 | 1400 | 'warning' |
|
1401 | 1401 | >>> _checkversion(b'git version 1.9-rc0') |
|
1402 | 1402 | 'ok' |
|
1403 | 1403 | >>> _checkversion(b'git version 1.9.0.265.g81cdec2') |
|
1404 | 1404 | 'ok' |
|
1405 | 1405 | >>> _checkversion(b'git version 1.9.0.GIT') |
|
1406 | 1406 | 'ok' |
|
1407 | 1407 | >>> _checkversion(b'git version 12345') |
|
1408 | 1408 | 'unknown' |
|
1409 | 1409 | >>> _checkversion(b'no') |
|
1410 | 1410 | 'unknown' |
|
1411 | 1411 | ''' |
|
1412 | 1412 | version = gitsubrepo._gitversion(out) |
|
1413 | 1413 | # git 1.4.0 can't work at all, but 1.5.X can in at least some cases, |
|
1414 | 1414 | # despite the docstring comment. For now, error on 1.4.0, warn on |
|
1415 | 1415 | # 1.5.0 but attempt to continue. |
|
1416 | 1416 | if version == -1: |
|
1417 | 1417 | return b'unknown' |
|
1418 | 1418 | if version < (1, 5, 0): |
|
1419 | 1419 | return b'abort' |
|
1420 | 1420 | elif version < (1, 6, 0): |
|
1421 | 1421 | return b'warning' |
|
1422 | 1422 | return b'ok' |
|
1423 | 1423 | |
|
1424 | 1424 | def _gitcommand(self, commands, env=None, stream=False): |
|
1425 | 1425 | return self._gitdir(commands, env=env, stream=stream)[0] |
|
1426 | 1426 | |
|
1427 | 1427 | def _gitdir(self, commands, env=None, stream=False): |
|
1428 | 1428 | return self._gitnodir( |
|
1429 | 1429 | commands, env=env, stream=stream, cwd=self._abspath |
|
1430 | 1430 | ) |
|
1431 | 1431 | |
|
1432 | 1432 | def _gitnodir(self, commands, env=None, stream=False, cwd=None): |
|
1433 | 1433 | """Calls the git command |
|
1434 | 1434 | |
|
1435 | 1435 | The methods tries to call the git command. versions prior to 1.6.0 |
|
1436 | 1436 | are not supported and very probably fail. |
|
1437 | 1437 | """ |
|
1438 | 1438 | self.ui.debug(b'%s: git %s\n' % (self._relpath, b' '.join(commands))) |
|
1439 | 1439 | if env is None: |
|
1440 | 1440 | env = encoding.environ.copy() |
|
1441 | 1441 | # disable localization for Git output (issue5176) |
|
1442 | 1442 | env[b'LC_ALL'] = b'C' |
|
1443 | 1443 | # fix for Git CVE-2015-7545 |
|
1444 | 1444 | if b'GIT_ALLOW_PROTOCOL' not in env: |
|
1445 | 1445 | env[b'GIT_ALLOW_PROTOCOL'] = b'file:git:http:https:ssh' |
|
1446 | 1446 | # unless ui.quiet is set, print git's stderr, |
|
1447 | 1447 | # which is mostly progress and useful info |
|
1448 | 1448 | errpipe = None |
|
1449 | 1449 | if self.ui.quiet: |
|
1450 | 1450 | errpipe = pycompat.open(os.devnull, b'w') |
|
1451 | 1451 | if self.ui._colormode and len(commands) and commands[0] == b"diff": |
|
1452 | 1452 | # insert the argument in the front, |
|
1453 | 1453 | # the end of git diff arguments is used for paths |
|
1454 | 1454 | commands.insert(1, b'--color') |
|
1455 | 1455 | p = subprocess.Popen( |
|
1456 | 1456 | pycompat.rapply( |
|
1457 | 1457 | procutil.tonativestr, [self._gitexecutable] + commands |
|
1458 | 1458 | ), |
|
1459 | 1459 | bufsize=-1, |
|
1460 | 1460 | cwd=pycompat.rapply(procutil.tonativestr, cwd), |
|
1461 | 1461 | env=procutil.tonativeenv(env), |
|
1462 | 1462 | close_fds=procutil.closefds, |
|
1463 | 1463 | stdout=subprocess.PIPE, |
|
1464 | 1464 | stderr=errpipe, |
|
1465 | 1465 | ) |
|
1466 | 1466 | if stream: |
|
1467 | 1467 | return p.stdout, None |
|
1468 | 1468 | |
|
1469 | 1469 | retdata = p.stdout.read().strip() |
|
1470 | 1470 | # wait for the child to exit to avoid race condition. |
|
1471 | 1471 | p.wait() |
|
1472 | 1472 | |
|
1473 | 1473 | if p.returncode != 0 and p.returncode != 1: |
|
1474 | 1474 | # there are certain error codes that are ok |
|
1475 | 1475 | command = commands[0] |
|
1476 | 1476 | if command in (b'cat-file', b'symbolic-ref'): |
|
1477 | 1477 | return retdata, p.returncode |
|
1478 | 1478 | # for all others, abort |
|
1479 | 1479 | raise error.Abort( |
|
1480 | 1480 | _(b'git %s error %d in %s') |
|
1481 | 1481 | % (command, p.returncode, self._relpath) |
|
1482 | 1482 | ) |
|
1483 | 1483 | |
|
1484 | 1484 | return retdata, p.returncode |
|
1485 | 1485 | |
|
1486 | 1486 | def _gitmissing(self): |
|
1487 | 1487 | return not self.wvfs.exists(b'.git') |
|
1488 | 1488 | |
|
1489 | 1489 | def _gitstate(self): |
|
1490 | 1490 | return self._gitcommand([b'rev-parse', b'HEAD']) |
|
1491 | 1491 | |
|
1492 | 1492 | def _gitcurrentbranch(self): |
|
1493 | 1493 | current, err = self._gitdir([b'symbolic-ref', b'HEAD', b'--quiet']) |
|
1494 | 1494 | if err: |
|
1495 | 1495 | current = None |
|
1496 | 1496 | return current |
|
1497 | 1497 | |
|
1498 | 1498 | def _gitremote(self, remote): |
|
1499 | 1499 | out = self._gitcommand([b'remote', b'show', b'-n', remote]) |
|
1500 | 1500 | line = out.split(b'\n')[1] |
|
1501 | 1501 | i = line.index(b'URL: ') + len(b'URL: ') |
|
1502 | 1502 | return line[i:] |
|
1503 | 1503 | |
|
1504 | 1504 | def _githavelocally(self, revision): |
|
1505 | 1505 | out, code = self._gitdir([b'cat-file', b'-e', revision]) |
|
1506 | 1506 | return code == 0 |
|
1507 | 1507 | |
|
1508 | 1508 | def _gitisancestor(self, r1, r2): |
|
1509 | 1509 | base = self._gitcommand([b'merge-base', r1, r2]) |
|
1510 | 1510 | return base == r1 |
|
1511 | 1511 | |
|
1512 | 1512 | def _gitisbare(self): |
|
1513 | 1513 | return self._gitcommand([b'config', b'--bool', b'core.bare']) == b'true' |
|
1514 | 1514 | |
|
1515 | 1515 | def _gitupdatestat(self): |
|
1516 | 1516 | """This must be run before git diff-index. |
|
1517 | 1517 | diff-index only looks at changes to file stat; |
|
1518 | 1518 | this command looks at file contents and updates the stat.""" |
|
1519 | 1519 | self._gitcommand([b'update-index', b'-q', b'--refresh']) |
|
1520 | 1520 | |
|
1521 | 1521 | def _gitbranchmap(self): |
|
1522 | 1522 | '''returns 2 things: |
|
1523 | 1523 | a map from git branch to revision |
|
1524 | 1524 | a map from revision to branches''' |
|
1525 | 1525 | branch2rev = {} |
|
1526 | 1526 | rev2branch = {} |
|
1527 | 1527 | |
|
1528 | 1528 | out = self._gitcommand( |
|
1529 | 1529 | [b'for-each-ref', b'--format', b'%(objectname) %(refname)'] |
|
1530 | 1530 | ) |
|
1531 | 1531 | for line in out.split(b'\n'): |
|
1532 | 1532 | revision, ref = line.split(b' ') |
|
1533 | 1533 | if not ref.startswith(b'refs/heads/') and not ref.startswith( |
|
1534 | 1534 | b'refs/remotes/' |
|
1535 | 1535 | ): |
|
1536 | 1536 | continue |
|
1537 | 1537 | if ref.startswith(b'refs/remotes/') and ref.endswith(b'/HEAD'): |
|
1538 | 1538 | continue # ignore remote/HEAD redirects |
|
1539 | 1539 | branch2rev[ref] = revision |
|
1540 | 1540 | rev2branch.setdefault(revision, []).append(ref) |
|
1541 | 1541 | return branch2rev, rev2branch |
|
1542 | 1542 | |
|
1543 | 1543 | def _gittracking(self, branches): |
|
1544 | 1544 | """return map of remote branch to local tracking branch""" |
|
1545 | 1545 | # assumes no more than one local tracking branch for each remote |
|
1546 | 1546 | tracking = {} |
|
1547 | 1547 | for b in branches: |
|
1548 | 1548 | if b.startswith(b'refs/remotes/'): |
|
1549 | 1549 | continue |
|
1550 | 1550 | bname = b.split(b'/', 2)[2] |
|
1551 | 1551 | remote = self._gitcommand([b'config', b'branch.%s.remote' % bname]) |
|
1552 | 1552 | if remote: |
|
1553 | 1553 | ref = self._gitcommand([b'config', b'branch.%s.merge' % bname]) |
|
1554 | 1554 | tracking[ |
|
1555 | 1555 | b'refs/remotes/%s/%s' % (remote, ref.split(b'/', 2)[2]) |
|
1556 | 1556 | ] = b |
|
1557 | 1557 | return tracking |
|
1558 | 1558 | |
|
1559 | 1559 | def _abssource(self, source): |
|
1560 | 1560 | if b'://' not in source: |
|
1561 | 1561 | # recognize the scp syntax as an absolute source |
|
1562 | 1562 | colon = source.find(b':') |
|
1563 | 1563 | if colon != -1 and b'/' not in source[:colon]: |
|
1564 | 1564 | return source |
|
1565 | 1565 | self._subsource = source |
|
1566 | 1566 | return _abssource(self) |
|
1567 | 1567 | |
|
1568 | 1568 | def _fetch(self, source, revision): |
|
1569 | 1569 | if self._gitmissing(): |
|
1570 | 1570 | # SEC: check for safe ssh url |
|
1571 | 1571 | util.checksafessh(source) |
|
1572 | 1572 | |
|
1573 | 1573 | source = self._abssource(source) |
|
1574 | 1574 | self.ui.status( |
|
1575 | 1575 | _(b'cloning subrepo %s from %s\n') % (self._relpath, source) |
|
1576 | 1576 | ) |
|
1577 | 1577 | self._gitnodir([b'clone', source, self._abspath]) |
|
1578 | 1578 | if self._githavelocally(revision): |
|
1579 | 1579 | return |
|
1580 | 1580 | self.ui.status( |
|
1581 | 1581 | _(b'pulling subrepo %s from %s\n') |
|
1582 | 1582 | % (self._relpath, self._gitremote(b'origin')) |
|
1583 | 1583 | ) |
|
1584 | 1584 | # try only origin: the originally cloned repo |
|
1585 | 1585 | self._gitcommand([b'fetch']) |
|
1586 | 1586 | if not self._githavelocally(revision): |
|
1587 | 1587 | raise error.Abort( |
|
1588 | 1588 | _(b'revision %s does not exist in subrepository "%s"\n') |
|
1589 | 1589 | % (revision, self._relpath) |
|
1590 | 1590 | ) |
|
1591 | 1591 | |
|
1592 | 1592 | @annotatesubrepoerror |
|
1593 | 1593 | def dirty(self, ignoreupdate=False, missing=False): |
|
1594 | 1594 | if self._gitmissing(): |
|
1595 | 1595 | return self._state[1] != b'' |
|
1596 | 1596 | if self._gitisbare(): |
|
1597 | 1597 | return True |
|
1598 | 1598 | if not ignoreupdate and self._state[1] != self._gitstate(): |
|
1599 | 1599 | # different version checked out |
|
1600 | 1600 | return True |
|
1601 | 1601 | # check for staged changes or modified files; ignore untracked files |
|
1602 | 1602 | self._gitupdatestat() |
|
1603 | 1603 | out, code = self._gitdir([b'diff-index', b'--quiet', b'HEAD']) |
|
1604 | 1604 | return code == 1 |
|
1605 | 1605 | |
|
1606 | 1606 | def basestate(self): |
|
1607 | 1607 | return self._gitstate() |
|
1608 | 1608 | |
|
1609 | 1609 | @annotatesubrepoerror |
|
1610 | 1610 | def get(self, state, overwrite=False): |
|
1611 | 1611 | source, revision, kind = state |
|
1612 | 1612 | if not revision: |
|
1613 | 1613 | self.remove() |
|
1614 | 1614 | return |
|
1615 | 1615 | self._fetch(source, revision) |
|
1616 | 1616 | # if the repo was set to be bare, unbare it |
|
1617 | 1617 | if self._gitisbare(): |
|
1618 | 1618 | self._gitcommand([b'config', b'core.bare', b'false']) |
|
1619 | 1619 | if self._gitstate() == revision: |
|
1620 | 1620 | self._gitcommand([b'reset', b'--hard', b'HEAD']) |
|
1621 | 1621 | return |
|
1622 | 1622 | elif self._gitstate() == revision: |
|
1623 | 1623 | if overwrite: |
|
1624 | 1624 | # first reset the index to unmark new files for commit, because |
|
1625 | 1625 | # reset --hard will otherwise throw away files added for commit, |
|
1626 | 1626 | # not just unmark them. |
|
1627 | 1627 | self._gitcommand([b'reset', b'HEAD']) |
|
1628 | 1628 | self._gitcommand([b'reset', b'--hard', b'HEAD']) |
|
1629 | 1629 | return |
|
1630 | 1630 | branch2rev, rev2branch = self._gitbranchmap() |
|
1631 | 1631 | |
|
1632 | 1632 | def checkout(args): |
|
1633 | 1633 | cmd = [b'checkout'] |
|
1634 | 1634 | if overwrite: |
|
1635 | 1635 | # first reset the index to unmark new files for commit, because |
|
1636 | 1636 | # the -f option will otherwise throw away files added for |
|
1637 | 1637 | # commit, not just unmark them. |
|
1638 | 1638 | self._gitcommand([b'reset', b'HEAD']) |
|
1639 | 1639 | cmd.append(b'-f') |
|
1640 | 1640 | self._gitcommand(cmd + args) |
|
1641 | 1641 | _sanitize(self.ui, self.wvfs, b'.git') |
|
1642 | 1642 | |
|
1643 | 1643 | def rawcheckout(): |
|
1644 | 1644 | # no branch to checkout, check it out with no branch |
|
1645 | 1645 | self.ui.warn( |
|
1646 | 1646 | _(b'checking out detached HEAD in subrepository "%s"\n') |
|
1647 | 1647 | % self._relpath |
|
1648 | 1648 | ) |
|
1649 | 1649 | self.ui.warn( |
|
1650 | 1650 | _(b'check out a git branch if you intend to make changes\n') |
|
1651 | 1651 | ) |
|
1652 | 1652 | checkout([b'-q', revision]) |
|
1653 | 1653 | |
|
1654 | 1654 | if revision not in rev2branch: |
|
1655 | 1655 | rawcheckout() |
|
1656 | 1656 | return |
|
1657 | 1657 | branches = rev2branch[revision] |
|
1658 | 1658 | firstlocalbranch = None |
|
1659 | 1659 | for b in branches: |
|
1660 | 1660 | if b == b'refs/heads/master': |
|
1661 | 1661 | # master trumps all other branches |
|
1662 | 1662 | checkout([b'refs/heads/master']) |
|
1663 | 1663 | return |
|
1664 | 1664 | if not firstlocalbranch and not b.startswith(b'refs/remotes/'): |
|
1665 | 1665 | firstlocalbranch = b |
|
1666 | 1666 | if firstlocalbranch: |
|
1667 | 1667 | checkout([firstlocalbranch]) |
|
1668 | 1668 | return |
|
1669 | 1669 | |
|
1670 | 1670 | tracking = self._gittracking(branch2rev.keys()) |
|
1671 | 1671 | # choose a remote branch already tracked if possible |
|
1672 | 1672 | remote = branches[0] |
|
1673 | 1673 | if remote not in tracking: |
|
1674 | 1674 | for b in branches: |
|
1675 | 1675 | if b in tracking: |
|
1676 | 1676 | remote = b |
|
1677 | 1677 | break |
|
1678 | 1678 | |
|
1679 | 1679 | if remote not in tracking: |
|
1680 | 1680 | # create a new local tracking branch |
|
1681 | 1681 | local = remote.split(b'/', 3)[3] |
|
1682 | 1682 | checkout([b'-b', local, remote]) |
|
1683 | 1683 | elif self._gitisancestor(branch2rev[tracking[remote]], remote): |
|
1684 | 1684 | # When updating to a tracked remote branch, |
|
1685 | 1685 | # if the local tracking branch is downstream of it, |
|
1686 | 1686 | # a normal `git pull` would have performed a "fast-forward merge" |
|
1687 | 1687 | # which is equivalent to updating the local branch to the remote. |
|
1688 | 1688 | # Since we are only looking at branching at update, we need to |
|
1689 | 1689 | # detect this situation and perform this action lazily. |
|
1690 | 1690 | if tracking[remote] != self._gitcurrentbranch(): |
|
1691 | 1691 | checkout([tracking[remote]]) |
|
1692 | 1692 | self._gitcommand([b'merge', b'--ff', remote]) |
|
1693 | 1693 | _sanitize(self.ui, self.wvfs, b'.git') |
|
1694 | 1694 | else: |
|
1695 | 1695 | # a real merge would be required, just checkout the revision |
|
1696 | 1696 | rawcheckout() |
|
1697 | 1697 | |
|
1698 | 1698 | @annotatesubrepoerror |
|
1699 | 1699 | def commit(self, text, user, date): |
|
1700 | 1700 | if self._gitmissing(): |
|
1701 | 1701 | raise error.Abort(_(b"subrepo %s is missing") % self._relpath) |
|
1702 | 1702 | cmd = [b'commit', b'-a', b'-m', text] |
|
1703 | 1703 | env = encoding.environ.copy() |
|
1704 | 1704 | if user: |
|
1705 | 1705 | cmd += [b'--author', user] |
|
1706 | 1706 | if date: |
|
1707 | 1707 | # git's date parser silently ignores when seconds < 1e9 |
|
1708 | 1708 | # convert to ISO8601 |
|
1709 | 1709 | env[b'GIT_AUTHOR_DATE'] = dateutil.datestr( |
|
1710 | 1710 | date, b'%Y-%m-%dT%H:%M:%S %1%2' |
|
1711 | 1711 | ) |
|
1712 | 1712 | self._gitcommand(cmd, env=env) |
|
1713 | 1713 | # make sure commit works otherwise HEAD might not exist under certain |
|
1714 | 1714 | # circumstances |
|
1715 | 1715 | return self._gitstate() |
|
1716 | 1716 | |
|
1717 | 1717 | @annotatesubrepoerror |
|
1718 | 1718 | def merge(self, state): |
|
1719 | 1719 | source, revision, kind = state |
|
1720 | 1720 | self._fetch(source, revision) |
|
1721 | 1721 | base = self._gitcommand([b'merge-base', revision, self._state[1]]) |
|
1722 | 1722 | self._gitupdatestat() |
|
1723 | 1723 | out, code = self._gitdir([b'diff-index', b'--quiet', b'HEAD']) |
|
1724 | 1724 | |
|
1725 | 1725 | def mergefunc(): |
|
1726 | 1726 | if base == revision: |
|
1727 | 1727 | self.get(state) # fast forward merge |
|
1728 | 1728 | elif base != self._state[1]: |
|
1729 | 1729 | self._gitcommand([b'merge', b'--no-commit', revision]) |
|
1730 | 1730 | _sanitize(self.ui, self.wvfs, b'.git') |
|
1731 | 1731 | |
|
1732 | 1732 | if self.dirty(): |
|
1733 | 1733 | if self._gitstate() != revision: |
|
1734 | 1734 | dirty = self._gitstate() == self._state[1] or code != 0 |
|
1735 | 1735 | if _updateprompt( |
|
1736 | 1736 | self.ui, self, dirty, self._state[1][:7], revision[:7] |
|
1737 | 1737 | ): |
|
1738 | 1738 | mergefunc() |
|
1739 | 1739 | else: |
|
1740 | 1740 | mergefunc() |
|
1741 | 1741 | |
|
1742 | 1742 | @annotatesubrepoerror |
|
1743 | 1743 | def push(self, opts): |
|
1744 | 1744 | force = opts.get(b'force') |
|
1745 | 1745 | |
|
1746 | 1746 | if not self._state[1]: |
|
1747 | 1747 | return True |
|
1748 | 1748 | if self._gitmissing(): |
|
1749 | 1749 | raise error.Abort(_(b"subrepo %s is missing") % self._relpath) |
|
1750 | 1750 | # if a branch in origin contains the revision, nothing to do |
|
1751 | 1751 | branch2rev, rev2branch = self._gitbranchmap() |
|
1752 | 1752 | if self._state[1] in rev2branch: |
|
1753 | 1753 | for b in rev2branch[self._state[1]]: |
|
1754 | 1754 | if b.startswith(b'refs/remotes/origin/'): |
|
1755 | 1755 | return True |
|
1756 | 1756 | for b, revision in pycompat.iteritems(branch2rev): |
|
1757 | 1757 | if b.startswith(b'refs/remotes/origin/'): |
|
1758 | 1758 | if self._gitisancestor(self._state[1], revision): |
|
1759 | 1759 | return True |
|
1760 | 1760 | # otherwise, try to push the currently checked out branch |
|
1761 | 1761 | cmd = [b'push'] |
|
1762 | 1762 | if force: |
|
1763 | 1763 | cmd.append(b'--force') |
|
1764 | 1764 | |
|
1765 | 1765 | current = self._gitcurrentbranch() |
|
1766 | 1766 | if current: |
|
1767 | 1767 | # determine if the current branch is even useful |
|
1768 | 1768 | if not self._gitisancestor(self._state[1], current): |
|
1769 | 1769 | self.ui.warn( |
|
1770 | 1770 | _( |
|
1771 | 1771 | b'unrelated git branch checked out ' |
|
1772 | 1772 | b'in subrepository "%s"\n' |
|
1773 | 1773 | ) |
|
1774 | 1774 | % self._relpath |
|
1775 | 1775 | ) |
|
1776 | 1776 | return False |
|
1777 | 1777 | self.ui.status( |
|
1778 | 1778 | _(b'pushing branch %s of subrepository "%s"\n') |
|
1779 | 1779 | % (current.split(b'/', 2)[2], self._relpath) |
|
1780 | 1780 | ) |
|
1781 | 1781 | ret = self._gitdir(cmd + [b'origin', current]) |
|
1782 | 1782 | return ret[1] == 0 |
|
1783 | 1783 | else: |
|
1784 | 1784 | self.ui.warn( |
|
1785 | 1785 | _( |
|
1786 | 1786 | b'no branch checked out in subrepository "%s"\n' |
|
1787 | 1787 | b'cannot push revision %s\n' |
|
1788 | 1788 | ) |
|
1789 | 1789 | % (self._relpath, self._state[1]) |
|
1790 | 1790 | ) |
|
1791 | 1791 | return False |
|
1792 | 1792 | |
|
1793 | 1793 | @annotatesubrepoerror |
|
1794 | 1794 | def add(self, ui, match, prefix, uipathfn, explicitonly, **opts): |
|
1795 | 1795 | if self._gitmissing(): |
|
1796 | 1796 | return [] |
|
1797 | 1797 | |
|
1798 | 1798 | s = self.status(None, unknown=True, clean=True) |
|
1799 | 1799 | |
|
1800 | 1800 | tracked = set() |
|
1801 | 1801 | # dirstates 'amn' warn, 'r' is added again |
|
1802 | 1802 | for l in (s.modified, s.added, s.deleted, s.clean): |
|
1803 | 1803 | tracked.update(l) |
|
1804 | 1804 | |
|
1805 | 1805 | # Unknown files not of interest will be rejected by the matcher |
|
1806 | 1806 | files = s.unknown |
|
1807 | 1807 | files.extend(match.files()) |
|
1808 | 1808 | |
|
1809 | 1809 | rejected = [] |
|
1810 | 1810 | |
|
1811 | 1811 | files = [f for f in sorted(set(files)) if match(f)] |
|
1812 | 1812 | for f in files: |
|
1813 | 1813 | exact = match.exact(f) |
|
1814 | 1814 | command = [b"add"] |
|
1815 | 1815 | if exact: |
|
1816 | 1816 | command.append(b"-f") # should be added, even if ignored |
|
1817 | 1817 | if ui.verbose or not exact: |
|
1818 | 1818 | ui.status(_(b'adding %s\n') % uipathfn(f)) |
|
1819 | 1819 | |
|
1820 | 1820 | if f in tracked: # hg prints 'adding' even if already tracked |
|
1821 | 1821 | if exact: |
|
1822 | 1822 | rejected.append(f) |
|
1823 | 1823 | continue |
|
1824 | 1824 | if not opts.get('dry_run'): |
|
1825 | 1825 | self._gitcommand(command + [f]) |
|
1826 | 1826 | |
|
1827 | 1827 | for f in rejected: |
|
1828 | 1828 | ui.warn(_(b"%s already tracked!\n") % uipathfn(f)) |
|
1829 | 1829 | |
|
1830 | 1830 | return rejected |
|
1831 | 1831 | |
|
1832 | 1832 | @annotatesubrepoerror |
|
1833 | 1833 | def remove(self): |
|
1834 | 1834 | if self._gitmissing(): |
|
1835 | 1835 | return |
|
1836 | 1836 | if self.dirty(): |
|
1837 | 1837 | self.ui.warn( |
|
1838 | 1838 | _(b'not removing repo %s because it has changes.\n') |
|
1839 | 1839 | % self._relpath |
|
1840 | 1840 | ) |
|
1841 | 1841 | return |
|
1842 | 1842 | # we can't fully delete the repository as it may contain |
|
1843 | 1843 | # local-only history |
|
1844 | 1844 | self.ui.note(_(b'removing subrepo %s\n') % self._relpath) |
|
1845 | 1845 | self._gitcommand([b'config', b'core.bare', b'true']) |
|
1846 | 1846 | for f, kind in self.wvfs.readdir(): |
|
1847 | 1847 | if f == b'.git': |
|
1848 | 1848 | continue |
|
1849 | 1849 | if kind == stat.S_IFDIR: |
|
1850 | 1850 | self.wvfs.rmtree(f) |
|
1851 | 1851 | else: |
|
1852 | 1852 | self.wvfs.unlink(f) |
|
1853 | 1853 | |
|
1854 | 1854 | def archive(self, archiver, prefix, match=None, decode=True): |
|
1855 | 1855 | total = 0 |
|
1856 | 1856 | source, revision = self._state |
|
1857 | 1857 | if not revision: |
|
1858 | 1858 | return total |
|
1859 | 1859 | self._fetch(source, revision) |
|
1860 | 1860 | |
|
1861 | 1861 | # Parse git's native archive command. |
|
1862 | 1862 | # This should be much faster than manually traversing the trees |
|
1863 | 1863 | # and objects with many subprocess calls. |
|
1864 | 1864 | tarstream = self._gitcommand([b'archive', revision], stream=True) |
|
1865 | 1865 | tar = tarfile.open(fileobj=tarstream, mode='r|') |
|
1866 | 1866 | relpath = subrelpath(self) |
|
1867 | 1867 | progress = self.ui.makeprogress( |
|
1868 | 1868 | _(b'archiving (%s)') % relpath, unit=_(b'files') |
|
1869 | 1869 | ) |
|
1870 | 1870 | progress.update(0) |
|
1871 | 1871 | for info in tar: |
|
1872 | 1872 | if info.isdir(): |
|
1873 | 1873 | continue |
|
1874 | 1874 | bname = pycompat.fsencode(info.name) |
|
1875 | 1875 | if match and not match(bname): |
|
1876 | 1876 | continue |
|
1877 | 1877 | if info.issym(): |
|
1878 | 1878 | data = info.linkname |
|
1879 | 1879 | else: |
|
1880 | 1880 | data = tar.extractfile(info).read() |
|
1881 | 1881 | archiver.addfile(prefix + bname, info.mode, info.issym(), data) |
|
1882 | 1882 | total += 1 |
|
1883 | 1883 | progress.increment() |
|
1884 | 1884 | progress.complete() |
|
1885 | 1885 | return total |
|
1886 | 1886 | |
|
1887 | 1887 | @annotatesubrepoerror |
|
1888 | 1888 | def cat(self, match, fm, fntemplate, prefix, **opts): |
|
1889 | 1889 | rev = self._state[1] |
|
1890 | 1890 | if match.anypats(): |
|
1891 | 1891 | return 1 # No support for include/exclude yet |
|
1892 | 1892 | |
|
1893 | 1893 | if not match.files(): |
|
1894 | 1894 | return 1 |
|
1895 | 1895 | |
|
1896 | 1896 | # TODO: add support for non-plain formatter (see cmdutil.cat()) |
|
1897 | 1897 | for f in match.files(): |
|
1898 | 1898 | output = self._gitcommand([b"show", b"%s:%s" % (rev, f)]) |
|
1899 | 1899 | fp = cmdutil.makefileobj( |
|
1900 | 1900 | self._ctx, fntemplate, pathname=self.wvfs.reljoin(prefix, f) |
|
1901 | 1901 | ) |
|
1902 | 1902 | fp.write(output) |
|
1903 | 1903 | fp.close() |
|
1904 | 1904 | return 0 |
|
1905 | 1905 | |
|
1906 | 1906 | @annotatesubrepoerror |
|
1907 | 1907 | def status(self, rev2, **opts): |
|
1908 | 1908 | rev1 = self._state[1] |
|
1909 | 1909 | if self._gitmissing() or not rev1: |
|
1910 | 1910 | # if the repo is missing, return no results |
|
1911 | 1911 | return scmutil.status([], [], [], [], [], [], []) |
|
1912 | 1912 | modified, added, removed = [], [], [] |
|
1913 | 1913 | self._gitupdatestat() |
|
1914 | 1914 | if rev2: |
|
1915 | 1915 | command = [b'diff-tree', b'--no-renames', b'-r', rev1, rev2] |
|
1916 | 1916 | else: |
|
1917 | 1917 | command = [b'diff-index', b'--no-renames', rev1] |
|
1918 | 1918 | out = self._gitcommand(command) |
|
1919 | 1919 | for line in out.split(b'\n'): |
|
1920 | 1920 | tab = line.find(b'\t') |
|
1921 | 1921 | if tab == -1: |
|
1922 | 1922 | continue |
|
1923 | 1923 | status, f = line[tab - 1 : tab], line[tab + 1 :] |
|
1924 | 1924 | if status == b'M': |
|
1925 | 1925 | modified.append(f) |
|
1926 | 1926 | elif status == b'A': |
|
1927 | 1927 | added.append(f) |
|
1928 | 1928 | elif status == b'D': |
|
1929 | 1929 | removed.append(f) |
|
1930 | 1930 | |
|
1931 | 1931 | deleted, unknown, ignored, clean = [], [], [], [] |
|
1932 | 1932 | |
|
1933 | 1933 | command = [b'status', b'--porcelain', b'-z'] |
|
1934 | 1934 | if opts.get('unknown'): |
|
1935 | 1935 | command += [b'--untracked-files=all'] |
|
1936 | 1936 | if opts.get('ignored'): |
|
1937 | 1937 | command += [b'--ignored'] |
|
1938 | 1938 | out = self._gitcommand(command) |
|
1939 | 1939 | |
|
1940 | 1940 | changedfiles = set() |
|
1941 | 1941 | changedfiles.update(modified) |
|
1942 | 1942 | changedfiles.update(added) |
|
1943 | 1943 | changedfiles.update(removed) |
|
1944 | 1944 | for line in out.split(b'\0'): |
|
1945 | 1945 | if not line: |
|
1946 | 1946 | continue |
|
1947 | 1947 | st = line[0:2] |
|
1948 | 1948 | # moves and copies show 2 files on one line |
|
1949 | 1949 | if line.find(b'\0') >= 0: |
|
1950 | 1950 | filename1, filename2 = line[3:].split(b'\0') |
|
1951 | 1951 | else: |
|
1952 | 1952 | filename1 = line[3:] |
|
1953 | 1953 | filename2 = None |
|
1954 | 1954 | |
|
1955 | 1955 | changedfiles.add(filename1) |
|
1956 | 1956 | if filename2: |
|
1957 | 1957 | changedfiles.add(filename2) |
|
1958 | 1958 | |
|
1959 | 1959 | if st == b'??': |
|
1960 | 1960 | unknown.append(filename1) |
|
1961 | 1961 | elif st == b'!!': |
|
1962 | 1962 | ignored.append(filename1) |
|
1963 | 1963 | |
|
1964 | 1964 | if opts.get('clean'): |
|
1965 | 1965 | out = self._gitcommand([b'ls-files']) |
|
1966 | 1966 | for f in out.split(b'\n'): |
|
1967 | 1967 | if not f in changedfiles: |
|
1968 | 1968 | clean.append(f) |
|
1969 | 1969 | |
|
1970 | 1970 | return scmutil.status( |
|
1971 | 1971 | modified, added, removed, deleted, unknown, ignored, clean |
|
1972 | 1972 | ) |
|
1973 | 1973 | |
|
1974 | 1974 | @annotatesubrepoerror |
|
1975 | 1975 | def diff(self, ui, diffopts, node2, match, prefix, **opts): |
|
1976 | 1976 | node1 = self._state[1] |
|
1977 | 1977 | cmd = [b'diff', b'--no-renames'] |
|
1978 | 1978 | if opts['stat']: |
|
1979 | 1979 | cmd.append(b'--stat') |
|
1980 | 1980 | else: |
|
1981 | 1981 | # for Git, this also implies '-p' |
|
1982 | 1982 | cmd.append(b'-U%d' % diffopts.context) |
|
1983 | 1983 | |
|
1984 | 1984 | if diffopts.noprefix: |
|
1985 | 1985 | cmd.extend( |
|
1986 | 1986 | [b'--src-prefix=%s/' % prefix, b'--dst-prefix=%s/' % prefix] |
|
1987 | 1987 | ) |
|
1988 | 1988 | else: |
|
1989 | 1989 | cmd.extend( |
|
1990 | 1990 | [b'--src-prefix=a/%s/' % prefix, b'--dst-prefix=b/%s/' % prefix] |
|
1991 | 1991 | ) |
|
1992 | 1992 | |
|
1993 | 1993 | if diffopts.ignorews: |
|
1994 | 1994 | cmd.append(b'--ignore-all-space') |
|
1995 | 1995 | if diffopts.ignorewsamount: |
|
1996 | 1996 | cmd.append(b'--ignore-space-change') |
|
1997 | 1997 | if ( |
|
1998 | 1998 | self._gitversion(self._gitcommand([b'--version'])) >= (1, 8, 4) |
|
1999 | 1999 | and diffopts.ignoreblanklines |
|
2000 | 2000 | ): |
|
2001 | 2001 | cmd.append(b'--ignore-blank-lines') |
|
2002 | 2002 | |
|
2003 | 2003 | cmd.append(node1) |
|
2004 | 2004 | if node2: |
|
2005 | 2005 | cmd.append(node2) |
|
2006 | 2006 | |
|
2007 | 2007 | output = b"" |
|
2008 | 2008 | if match.always(): |
|
2009 | 2009 | output += self._gitcommand(cmd) + b'\n' |
|
2010 | 2010 | else: |
|
2011 | 2011 | st = self.status(node2) |
|
2012 | 2012 | files = [ |
|
2013 | 2013 | f |
|
2014 | 2014 | for sublist in (st.modified, st.added, st.removed) |
|
2015 | 2015 | for f in sublist |
|
2016 | 2016 | ] |
|
2017 | 2017 | for f in files: |
|
2018 | 2018 | if match(f): |
|
2019 | 2019 | output += self._gitcommand(cmd + [b'--', f]) + b'\n' |
|
2020 | 2020 | |
|
2021 | 2021 | if output.strip(): |
|
2022 | 2022 | ui.write(output) |
|
2023 | 2023 | |
|
2024 | 2024 | @annotatesubrepoerror |
|
2025 | 2025 | def revert(self, substate, *pats, **opts): |
|
2026 | 2026 | self.ui.status(_(b'reverting subrepo %s\n') % substate[0]) |
|
2027 | 2027 | if not opts.get('no_backup'): |
|
2028 | 2028 | status = self.status(None) |
|
2029 | 2029 | names = status.modified |
|
2030 | 2030 | for name in names: |
|
2031 | 2031 | # backuppath() expects a path relative to the parent repo (the |
|
2032 | 2032 | # repo that ui.origbackuppath is relative to) |
|
2033 | 2033 | parentname = os.path.join(self._path, name) |
|
2034 | 2034 | bakname = scmutil.backuppath( |
|
2035 | 2035 | self.ui, self._subparent, parentname |
|
2036 | 2036 | ) |
|
2037 | 2037 | self.ui.note( |
|
2038 | 2038 | _(b'saving current version of %s as %s\n') |
|
2039 | 2039 | % (name, os.path.relpath(bakname)) |
|
2040 | 2040 | ) |
|
2041 | 2041 | util.rename(self.wvfs.join(name), bakname) |
|
2042 | 2042 | |
|
2043 | 2043 | if not opts.get('dry_run'): |
|
2044 | 2044 | self.get(substate, overwrite=True) |
|
2045 | 2045 | return [] |
|
2046 | 2046 | |
|
2047 | 2047 | def shortid(self, revid): |
|
2048 | 2048 | return revid[:7] |
|
2049 | 2049 | |
|
2050 | 2050 | |
|
2051 | 2051 | types = { |
|
2052 | 2052 | b'hg': hgsubrepo, |
|
2053 | 2053 | b'svn': svnsubrepo, |
|
2054 | 2054 | b'git': gitsubrepo, |
|
2055 | 2055 | } |
General Comments 0
You need to be logged in to leave comments.
Login now