Show More
The requested changes are too big and content was truncated. Show full diff
@@ -1,5607 +1,5607 b'' | |||
|
1 | 1 | # commands.py - command processing for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import difflib |
|
11 | 11 | import errno |
|
12 | 12 | import os |
|
13 | 13 | import re |
|
14 | 14 | import sys |
|
15 | 15 | |
|
16 | 16 | from .i18n import _ |
|
17 | 17 | from .node import ( |
|
18 | 18 | hex, |
|
19 | 19 | nullid, |
|
20 | 20 | nullrev, |
|
21 | 21 | short, |
|
22 | 22 | ) |
|
23 | 23 | from . import ( |
|
24 | 24 | archival, |
|
25 | 25 | bookmarks, |
|
26 | 26 | bundle2, |
|
27 | 27 | changegroup, |
|
28 | 28 | cmdutil, |
|
29 | 29 | copies, |
|
30 | 30 | debugcommands as debugcommandsmod, |
|
31 | 31 | destutil, |
|
32 | 32 | dirstateguard, |
|
33 | 33 | discovery, |
|
34 | 34 | encoding, |
|
35 | 35 | error, |
|
36 | 36 | exchange, |
|
37 | 37 | extensions, |
|
38 | 38 | formatter, |
|
39 | 39 | graphmod, |
|
40 | 40 | hbisect, |
|
41 | 41 | help, |
|
42 | 42 | hg, |
|
43 | 43 | lock as lockmod, |
|
44 | 44 | logcmdutil, |
|
45 | 45 | merge as mergemod, |
|
46 | 46 | obsolete, |
|
47 | 47 | obsutil, |
|
48 | 48 | patch, |
|
49 | 49 | phases, |
|
50 | 50 | pycompat, |
|
51 | 51 | rcutil, |
|
52 | 52 | registrar, |
|
53 | 53 | revsetlang, |
|
54 | 54 | rewriteutil, |
|
55 | 55 | scmutil, |
|
56 | 56 | server, |
|
57 | 57 | streamclone, |
|
58 | 58 | tags as tagsmod, |
|
59 | 59 | templatekw, |
|
60 | 60 | ui as uimod, |
|
61 | 61 | util, |
|
62 | 62 | wireprotoserver, |
|
63 | 63 | ) |
|
64 | 64 | |
|
65 | 65 | release = lockmod.release |
|
66 | 66 | |
|
67 | 67 | table = {} |
|
68 | 68 | table.update(debugcommandsmod.command._table) |
|
69 | 69 | |
|
70 | 70 | command = registrar.command(table) |
|
71 | 71 | readonly = registrar.command.readonly |
|
72 | 72 | |
|
73 | 73 | # common command options |
|
74 | 74 | |
|
75 | 75 | globalopts = [ |
|
76 | 76 | ('R', 'repository', '', |
|
77 | 77 | _('repository root directory or name of overlay bundle file'), |
|
78 | 78 | _('REPO')), |
|
79 | 79 | ('', 'cwd', '', |
|
80 | 80 | _('change working directory'), _('DIR')), |
|
81 | 81 | ('y', 'noninteractive', None, |
|
82 | 82 | _('do not prompt, automatically pick the first choice for all prompts')), |
|
83 | 83 | ('q', 'quiet', None, _('suppress output')), |
|
84 | 84 | ('v', 'verbose', None, _('enable additional output')), |
|
85 | 85 | ('', 'color', '', |
|
86 | 86 | # i18n: 'always', 'auto', 'never', and 'debug' are keywords |
|
87 | 87 | # and should not be translated |
|
88 | 88 | _("when to colorize (boolean, always, auto, never, or debug)"), |
|
89 | 89 | _('TYPE')), |
|
90 | 90 | ('', 'config', [], |
|
91 | 91 | _('set/override config option (use \'section.name=value\')'), |
|
92 | 92 | _('CONFIG')), |
|
93 | 93 | ('', 'debug', None, _('enable debugging output')), |
|
94 | 94 | ('', 'debugger', None, _('start debugger')), |
|
95 | 95 | ('', 'encoding', encoding.encoding, _('set the charset encoding'), |
|
96 | 96 | _('ENCODE')), |
|
97 | 97 | ('', 'encodingmode', encoding.encodingmode, |
|
98 | 98 | _('set the charset encoding mode'), _('MODE')), |
|
99 | 99 | ('', 'traceback', None, _('always print a traceback on exception')), |
|
100 | 100 | ('', 'time', None, _('time how long the command takes')), |
|
101 | 101 | ('', 'profile', None, _('print command execution profile')), |
|
102 | 102 | ('', 'version', None, _('output version information and exit')), |
|
103 | 103 | ('h', 'help', None, _('display help and exit')), |
|
104 | 104 | ('', 'hidden', False, _('consider hidden changesets')), |
|
105 | 105 | ('', 'pager', 'auto', |
|
106 | 106 | _("when to paginate (boolean, always, auto, or never)"), _('TYPE')), |
|
107 | 107 | ] |
|
108 | 108 | |
|
109 | 109 | dryrunopts = cmdutil.dryrunopts |
|
110 | 110 | remoteopts = cmdutil.remoteopts |
|
111 | 111 | walkopts = cmdutil.walkopts |
|
112 | 112 | commitopts = cmdutil.commitopts |
|
113 | 113 | commitopts2 = cmdutil.commitopts2 |
|
114 | 114 | formatteropts = cmdutil.formatteropts |
|
115 | 115 | templateopts = cmdutil.templateopts |
|
116 | 116 | logopts = cmdutil.logopts |
|
117 | 117 | diffopts = cmdutil.diffopts |
|
118 | 118 | diffwsopts = cmdutil.diffwsopts |
|
119 | 119 | diffopts2 = cmdutil.diffopts2 |
|
120 | 120 | mergetoolopts = cmdutil.mergetoolopts |
|
121 | 121 | similarityopts = cmdutil.similarityopts |
|
122 | 122 | subrepoopts = cmdutil.subrepoopts |
|
123 | 123 | debugrevlogopts = cmdutil.debugrevlogopts |
|
124 | 124 | |
|
125 | 125 | # Commands start here, listed alphabetically |
|
126 | 126 | |
|
127 | 127 | @command('^add', |
|
128 | 128 | walkopts + subrepoopts + dryrunopts, |
|
129 | 129 | _('[OPTION]... [FILE]...'), |
|
130 | 130 | inferrepo=True) |
|
131 | 131 | def add(ui, repo, *pats, **opts): |
|
132 | 132 | """add the specified files on the next commit |
|
133 | 133 | |
|
134 | 134 | Schedule files to be version controlled and added to the |
|
135 | 135 | repository. |
|
136 | 136 | |
|
137 | 137 | The files will be added to the repository at the next commit. To |
|
138 | 138 | undo an add before that, see :hg:`forget`. |
|
139 | 139 | |
|
140 | 140 | If no names are given, add all files to the repository (except |
|
141 | 141 | files matching ``.hgignore``). |
|
142 | 142 | |
|
143 | 143 | .. container:: verbose |
|
144 | 144 | |
|
145 | 145 | Examples: |
|
146 | 146 | |
|
147 | 147 | - New (unknown) files are added |
|
148 | 148 | automatically by :hg:`add`:: |
|
149 | 149 | |
|
150 | 150 | $ ls |
|
151 | 151 | foo.c |
|
152 | 152 | $ hg status |
|
153 | 153 | ? foo.c |
|
154 | 154 | $ hg add |
|
155 | 155 | adding foo.c |
|
156 | 156 | $ hg status |
|
157 | 157 | A foo.c |
|
158 | 158 | |
|
159 | 159 | - Specific files to be added can be specified:: |
|
160 | 160 | |
|
161 | 161 | $ ls |
|
162 | 162 | bar.c foo.c |
|
163 | 163 | $ hg status |
|
164 | 164 | ? bar.c |
|
165 | 165 | ? foo.c |
|
166 | 166 | $ hg add bar.c |
|
167 | 167 | $ hg status |
|
168 | 168 | A bar.c |
|
169 | 169 | ? foo.c |
|
170 | 170 | |
|
171 | 171 | Returns 0 if all files are successfully added. |
|
172 | 172 | """ |
|
173 | 173 | |
|
174 | 174 | m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts)) |
|
175 | 175 | rejected = cmdutil.add(ui, repo, m, "", False, **opts) |
|
176 | 176 | return rejected and 1 or 0 |
|
177 | 177 | |
|
178 | 178 | @command('addremove', |
|
179 | 179 | similarityopts + subrepoopts + walkopts + dryrunopts, |
|
180 | 180 | _('[OPTION]... [FILE]...'), |
|
181 | 181 | inferrepo=True) |
|
182 | 182 | def addremove(ui, repo, *pats, **opts): |
|
183 | 183 | """add all new files, delete all missing files |
|
184 | 184 | |
|
185 | 185 | Add all new files and remove all missing files from the |
|
186 | 186 | repository. |
|
187 | 187 | |
|
188 | 188 | Unless names are given, new files are ignored if they match any of |
|
189 | 189 | the patterns in ``.hgignore``. As with add, these changes take |
|
190 | 190 | effect at the next commit. |
|
191 | 191 | |
|
192 | 192 | Use the -s/--similarity option to detect renamed files. This |
|
193 | 193 | option takes a percentage between 0 (disabled) and 100 (files must |
|
194 | 194 | be identical) as its parameter. With a parameter greater than 0, |
|
195 | 195 | this compares every removed file with every added file and records |
|
196 | 196 | those similar enough as renames. Detecting renamed files this way |
|
197 | 197 | can be expensive. After using this option, :hg:`status -C` can be |
|
198 | 198 | used to check which files were identified as moved or renamed. If |
|
199 | 199 | not specified, -s/--similarity defaults to 100 and only renames of |
|
200 | 200 | identical files are detected. |
|
201 | 201 | |
|
202 | 202 | .. container:: verbose |
|
203 | 203 | |
|
204 | 204 | Examples: |
|
205 | 205 | |
|
206 | 206 | - A number of files (bar.c and foo.c) are new, |
|
207 | 207 | while foobar.c has been removed (without using :hg:`remove`) |
|
208 | 208 | from the repository:: |
|
209 | 209 | |
|
210 | 210 | $ ls |
|
211 | 211 | bar.c foo.c |
|
212 | 212 | $ hg status |
|
213 | 213 | ! foobar.c |
|
214 | 214 | ? bar.c |
|
215 | 215 | ? foo.c |
|
216 | 216 | $ hg addremove |
|
217 | 217 | adding bar.c |
|
218 | 218 | adding foo.c |
|
219 | 219 | removing foobar.c |
|
220 | 220 | $ hg status |
|
221 | 221 | A bar.c |
|
222 | 222 | A foo.c |
|
223 | 223 | R foobar.c |
|
224 | 224 | |
|
225 | 225 | - A file foobar.c was moved to foo.c without using :hg:`rename`. |
|
226 | 226 | Afterwards, it was edited slightly:: |
|
227 | 227 | |
|
228 | 228 | $ ls |
|
229 | 229 | foo.c |
|
230 | 230 | $ hg status |
|
231 | 231 | ! foobar.c |
|
232 | 232 | ? foo.c |
|
233 | 233 | $ hg addremove --similarity 90 |
|
234 | 234 | removing foobar.c |
|
235 | 235 | adding foo.c |
|
236 | 236 | recording removal of foobar.c as rename to foo.c (94% similar) |
|
237 | 237 | $ hg status -C |
|
238 | 238 | A foo.c |
|
239 | 239 | foobar.c |
|
240 | 240 | R foobar.c |
|
241 | 241 | |
|
242 | 242 | Returns 0 if all files are successfully added. |
|
243 | 243 | """ |
|
244 | 244 | opts = pycompat.byteskwargs(opts) |
|
245 | 245 | try: |
|
246 | 246 | sim = float(opts.get('similarity') or 100) |
|
247 | 247 | except ValueError: |
|
248 | 248 | raise error.Abort(_('similarity must be a number')) |
|
249 | 249 | if sim < 0 or sim > 100: |
|
250 | 250 | raise error.Abort(_('similarity must be between 0 and 100')) |
|
251 | 251 | matcher = scmutil.match(repo[None], pats, opts) |
|
252 | 252 | return scmutil.addremove(repo, matcher, "", opts, similarity=sim / 100.0) |
|
253 | 253 | |
|
254 | 254 | @command('^annotate|blame', |
|
255 | 255 | [('r', 'rev', '', _('annotate the specified revision'), _('REV')), |
|
256 | 256 | ('', 'follow', None, |
|
257 | 257 | _('follow copies/renames and list the filename (DEPRECATED)')), |
|
258 | 258 | ('', 'no-follow', None, _("don't follow copies and renames")), |
|
259 | 259 | ('a', 'text', None, _('treat all files as text')), |
|
260 | 260 | ('u', 'user', None, _('list the author (long with -v)')), |
|
261 | 261 | ('f', 'file', None, _('list the filename')), |
|
262 | 262 | ('d', 'date', None, _('list the date (short with -q)')), |
|
263 | 263 | ('n', 'number', None, _('list the revision number (default)')), |
|
264 | 264 | ('c', 'changeset', None, _('list the changeset')), |
|
265 | 265 | ('l', 'line-number', None, _('show line number at the first appearance')), |
|
266 | 266 | ('', 'skip', [], _('revision to not display (EXPERIMENTAL)'), _('REV')), |
|
267 | 267 | ] + diffwsopts + walkopts + formatteropts, |
|
268 | 268 | _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'), |
|
269 | 269 | inferrepo=True) |
|
270 | 270 | def annotate(ui, repo, *pats, **opts): |
|
271 | 271 | """show changeset information by line for each file |
|
272 | 272 | |
|
273 | 273 | List changes in files, showing the revision id responsible for |
|
274 | 274 | each line. |
|
275 | 275 | |
|
276 | 276 | This command is useful for discovering when a change was made and |
|
277 | 277 | by whom. |
|
278 | 278 | |
|
279 | 279 | If you include --file, --user, or --date, the revision number is |
|
280 | 280 | suppressed unless you also include --number. |
|
281 | 281 | |
|
282 | 282 | Without the -a/--text option, annotate will avoid processing files |
|
283 | 283 | it detects as binary. With -a, annotate will annotate the file |
|
284 | 284 | anyway, although the results will probably be neither useful |
|
285 | 285 | nor desirable. |
|
286 | 286 | |
|
287 | 287 | Returns 0 on success. |
|
288 | 288 | """ |
|
289 | 289 | opts = pycompat.byteskwargs(opts) |
|
290 | 290 | if not pats: |
|
291 | 291 | raise error.Abort(_('at least one filename or pattern is required')) |
|
292 | 292 | |
|
293 | 293 | if opts.get('follow'): |
|
294 | 294 | # --follow is deprecated and now just an alias for -f/--file |
|
295 | 295 | # to mimic the behavior of Mercurial before version 1.5 |
|
296 | 296 | opts['file'] = True |
|
297 | 297 | |
|
298 | 298 | rev = opts.get('rev') |
|
299 | 299 | if rev: |
|
300 | 300 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
301 | 301 | ctx = scmutil.revsingle(repo, rev) |
|
302 | 302 | |
|
303 | 303 | rootfm = ui.formatter('annotate', opts) |
|
304 | 304 | if ui.quiet: |
|
305 | 305 | datefunc = util.shortdate |
|
306 | 306 | else: |
|
307 | 307 | datefunc = util.datestr |
|
308 | 308 | if ctx.rev() is None: |
|
309 | 309 | def hexfn(node): |
|
310 | 310 | if node is None: |
|
311 | 311 | return None |
|
312 | 312 | else: |
|
313 | 313 | return rootfm.hexfunc(node) |
|
314 | 314 | if opts.get('changeset'): |
|
315 | 315 | # omit "+" suffix which is appended to node hex |
|
316 | 316 | def formatrev(rev): |
|
317 | 317 | if rev is None: |
|
318 | 318 | return '%d' % ctx.p1().rev() |
|
319 | 319 | else: |
|
320 | 320 | return '%d' % rev |
|
321 | 321 | else: |
|
322 | 322 | def formatrev(rev): |
|
323 | 323 | if rev is None: |
|
324 | 324 | return '%d+' % ctx.p1().rev() |
|
325 | 325 | else: |
|
326 | 326 | return '%d ' % rev |
|
327 | 327 | def formathex(hex): |
|
328 | 328 | if hex is None: |
|
329 | 329 | return '%s+' % rootfm.hexfunc(ctx.p1().node()) |
|
330 | 330 | else: |
|
331 | 331 | return '%s ' % hex |
|
332 | 332 | else: |
|
333 | 333 | hexfn = rootfm.hexfunc |
|
334 | 334 | formatrev = formathex = pycompat.bytestr |
|
335 | 335 | |
|
336 | 336 | opmap = [('user', ' ', lambda x: x.fctx.user(), ui.shortuser), |
|
337 | 337 | ('number', ' ', lambda x: x.fctx.rev(), formatrev), |
|
338 | 338 | ('changeset', ' ', lambda x: hexfn(x.fctx.node()), formathex), |
|
339 | 339 | ('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)), |
|
340 | 340 | ('file', ' ', lambda x: x.fctx.path(), pycompat.bytestr), |
|
341 | 341 | ('line_number', ':', lambda x: x.lineno, pycompat.bytestr), |
|
342 | 342 | ] |
|
343 | 343 | fieldnamemap = {'number': 'rev', 'changeset': 'node'} |
|
344 | 344 | |
|
345 | 345 | if (not opts.get('user') and not opts.get('changeset') |
|
346 | 346 | and not opts.get('date') and not opts.get('file')): |
|
347 | 347 | opts['number'] = True |
|
348 | 348 | |
|
349 | 349 | linenumber = opts.get('line_number') is not None |
|
350 | 350 | if linenumber and (not opts.get('changeset')) and (not opts.get('number')): |
|
351 | 351 | raise error.Abort(_('at least one of -n/-c is required for -l')) |
|
352 | 352 | |
|
353 | 353 | ui.pager('annotate') |
|
354 | 354 | |
|
355 | 355 | if rootfm.isplain(): |
|
356 | 356 | def makefunc(get, fmt): |
|
357 | 357 | return lambda x: fmt(get(x)) |
|
358 | 358 | else: |
|
359 | 359 | def makefunc(get, fmt): |
|
360 | 360 | return get |
|
361 | 361 | funcmap = [(makefunc(get, fmt), sep) for op, sep, get, fmt in opmap |
|
362 | 362 | if opts.get(op)] |
|
363 | 363 | funcmap[0] = (funcmap[0][0], '') # no separator in front of first column |
|
364 | 364 | fields = ' '.join(fieldnamemap.get(op, op) for op, sep, get, fmt in opmap |
|
365 | 365 | if opts.get(op)) |
|
366 | 366 | |
|
367 | 367 | def bad(x, y): |
|
368 | 368 | raise error.Abort("%s: %s" % (x, y)) |
|
369 | 369 | |
|
370 | 370 | m = scmutil.match(ctx, pats, opts, badfn=bad) |
|
371 | 371 | |
|
372 | 372 | follow = not opts.get('no_follow') |
|
373 | 373 | diffopts = patch.difffeatureopts(ui, opts, section='annotate', |
|
374 | 374 | whitespace=True) |
|
375 | 375 | skiprevs = opts.get('skip') |
|
376 | 376 | if skiprevs: |
|
377 | 377 | skiprevs = scmutil.revrange(repo, skiprevs) |
|
378 | 378 | |
|
379 | 379 | for abs in ctx.walk(m): |
|
380 | 380 | fctx = ctx[abs] |
|
381 | 381 | rootfm.startitem() |
|
382 | 382 | rootfm.data(abspath=abs, path=m.rel(abs)) |
|
383 | 383 | if not opts.get('text') and fctx.isbinary(): |
|
384 | 384 | rootfm.plain(_("%s: binary file\n") |
|
385 | 385 | % ((pats and m.rel(abs)) or abs)) |
|
386 | 386 | continue |
|
387 | 387 | |
|
388 | 388 | fm = rootfm.nested('lines') |
|
389 | 389 | lines = fctx.annotate(follow=follow, linenumber=linenumber, |
|
390 | 390 | skiprevs=skiprevs, diffopts=diffopts) |
|
391 | 391 | if not lines: |
|
392 | 392 | fm.end() |
|
393 | 393 | continue |
|
394 | 394 | formats = [] |
|
395 | 395 | pieces = [] |
|
396 | 396 | |
|
397 | 397 | for f, sep in funcmap: |
|
398 | 398 | l = [f(n) for n, dummy in lines] |
|
399 | 399 | if fm.isplain(): |
|
400 | 400 | sizes = [encoding.colwidth(x) for x in l] |
|
401 | 401 | ml = max(sizes) |
|
402 | 402 | formats.append([sep + ' ' * (ml - w) + '%s' for w in sizes]) |
|
403 | 403 | else: |
|
404 | 404 | formats.append(['%s' for x in l]) |
|
405 | 405 | pieces.append(l) |
|
406 | 406 | |
|
407 | 407 | for f, p, l in zip(zip(*formats), zip(*pieces), lines): |
|
408 | 408 | fm.startitem() |
|
409 | 409 | fm.write(fields, "".join(f), *p) |
|
410 | 410 | if l[0].skip: |
|
411 | 411 | fmt = "* %s" |
|
412 | 412 | else: |
|
413 | 413 | fmt = ": %s" |
|
414 | 414 | fm.write('line', fmt, l[1]) |
|
415 | 415 | |
|
416 | 416 | if not lines[-1][1].endswith('\n'): |
|
417 | 417 | fm.plain('\n') |
|
418 | 418 | fm.end() |
|
419 | 419 | |
|
420 | 420 | rootfm.end() |
|
421 | 421 | |
|
422 | 422 | @command('archive', |
|
423 | 423 | [('', 'no-decode', None, _('do not pass files through decoders')), |
|
424 | 424 | ('p', 'prefix', '', _('directory prefix for files in archive'), |
|
425 | 425 | _('PREFIX')), |
|
426 | 426 | ('r', 'rev', '', _('revision to distribute'), _('REV')), |
|
427 | 427 | ('t', 'type', '', _('type of distribution to create'), _('TYPE')), |
|
428 | 428 | ] + subrepoopts + walkopts, |
|
429 | 429 | _('[OPTION]... DEST')) |
|
430 | 430 | def archive(ui, repo, dest, **opts): |
|
431 | 431 | '''create an unversioned archive of a repository revision |
|
432 | 432 | |
|
433 | 433 | By default, the revision used is the parent of the working |
|
434 | 434 | directory; use -r/--rev to specify a different revision. |
|
435 | 435 | |
|
436 | 436 | The archive type is automatically detected based on file |
|
437 | 437 | extension (to override, use -t/--type). |
|
438 | 438 | |
|
439 | 439 | .. container:: verbose |
|
440 | 440 | |
|
441 | 441 | Examples: |
|
442 | 442 | |
|
443 | 443 | - create a zip file containing the 1.0 release:: |
|
444 | 444 | |
|
445 | 445 | hg archive -r 1.0 project-1.0.zip |
|
446 | 446 | |
|
447 | 447 | - create a tarball excluding .hg files:: |
|
448 | 448 | |
|
449 | 449 | hg archive project.tar.gz -X ".hg*" |
|
450 | 450 | |
|
451 | 451 | Valid types are: |
|
452 | 452 | |
|
453 | 453 | :``files``: a directory full of files (default) |
|
454 | 454 | :``tar``: tar archive, uncompressed |
|
455 | 455 | :``tbz2``: tar archive, compressed using bzip2 |
|
456 | 456 | :``tgz``: tar archive, compressed using gzip |
|
457 | 457 | :``uzip``: zip archive, uncompressed |
|
458 | 458 | :``zip``: zip archive, compressed using deflate |
|
459 | 459 | |
|
460 | 460 | The exact name of the destination archive or directory is given |
|
461 | 461 | using a format string; see :hg:`help export` for details. |
|
462 | 462 | |
|
463 | 463 | Each member added to an archive file has a directory prefix |
|
464 | 464 | prepended. Use -p/--prefix to specify a format string for the |
|
465 | 465 | prefix. The default is the basename of the archive, with suffixes |
|
466 | 466 | removed. |
|
467 | 467 | |
|
468 | 468 | Returns 0 on success. |
|
469 | 469 | ''' |
|
470 | 470 | |
|
471 | 471 | opts = pycompat.byteskwargs(opts) |
|
472 | 472 | rev = opts.get('rev') |
|
473 | 473 | if rev: |
|
474 | 474 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
475 | 475 | ctx = scmutil.revsingle(repo, rev) |
|
476 | 476 | if not ctx: |
|
477 | 477 | raise error.Abort(_('no working directory: please specify a revision')) |
|
478 | 478 | node = ctx.node() |
|
479 | 479 | dest = cmdutil.makefilename(ctx, dest) |
|
480 | 480 | if os.path.realpath(dest) == repo.root: |
|
481 | 481 | raise error.Abort(_('repository root cannot be destination')) |
|
482 | 482 | |
|
483 | 483 | kind = opts.get('type') or archival.guesskind(dest) or 'files' |
|
484 | 484 | prefix = opts.get('prefix') |
|
485 | 485 | |
|
486 | 486 | if dest == '-': |
|
487 | 487 | if kind == 'files': |
|
488 | 488 | raise error.Abort(_('cannot archive plain files to stdout')) |
|
489 | 489 | dest = cmdutil.makefileobj(ctx, dest) |
|
490 | 490 | if not prefix: |
|
491 | 491 | prefix = os.path.basename(repo.root) + '-%h' |
|
492 | 492 | |
|
493 | 493 | prefix = cmdutil.makefilename(ctx, prefix) |
|
494 | 494 | match = scmutil.match(ctx, [], opts) |
|
495 | 495 | archival.archive(repo, dest, node, kind, not opts.get('no_decode'), |
|
496 | 496 | match, prefix, subrepos=opts.get('subrepos')) |
|
497 | 497 | |
|
498 | 498 | @command('backout', |
|
499 | 499 | [('', 'merge', None, _('merge with old dirstate parent after backout')), |
|
500 | 500 | ('', 'commit', None, |
|
501 | 501 | _('commit if no conflicts were encountered (DEPRECATED)')), |
|
502 | 502 | ('', 'no-commit', None, _('do not commit')), |
|
503 | 503 | ('', 'parent', '', |
|
504 | 504 | _('parent to choose when backing out merge (DEPRECATED)'), _('REV')), |
|
505 | 505 | ('r', 'rev', '', _('revision to backout'), _('REV')), |
|
506 | 506 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
507 | 507 | ] + mergetoolopts + walkopts + commitopts + commitopts2, |
|
508 | 508 | _('[OPTION]... [-r] REV')) |
|
509 | 509 | def backout(ui, repo, node=None, rev=None, **opts): |
|
510 | 510 | '''reverse effect of earlier changeset |
|
511 | 511 | |
|
512 | 512 | Prepare a new changeset with the effect of REV undone in the |
|
513 | 513 | current working directory. If no conflicts were encountered, |
|
514 | 514 | it will be committed immediately. |
|
515 | 515 | |
|
516 | 516 | If REV is the parent of the working directory, then this new changeset |
|
517 | 517 | is committed automatically (unless --no-commit is specified). |
|
518 | 518 | |
|
519 | 519 | .. note:: |
|
520 | 520 | |
|
521 | 521 | :hg:`backout` cannot be used to fix either an unwanted or |
|
522 | 522 | incorrect merge. |
|
523 | 523 | |
|
524 | 524 | .. container:: verbose |
|
525 | 525 | |
|
526 | 526 | Examples: |
|
527 | 527 | |
|
528 | 528 | - Reverse the effect of the parent of the working directory. |
|
529 | 529 | This backout will be committed immediately:: |
|
530 | 530 | |
|
531 | 531 | hg backout -r . |
|
532 | 532 | |
|
533 | 533 | - Reverse the effect of previous bad revision 23:: |
|
534 | 534 | |
|
535 | 535 | hg backout -r 23 |
|
536 | 536 | |
|
537 | 537 | - Reverse the effect of previous bad revision 23 and |
|
538 | 538 | leave changes uncommitted:: |
|
539 | 539 | |
|
540 | 540 | hg backout -r 23 --no-commit |
|
541 | 541 | hg commit -m "Backout revision 23" |
|
542 | 542 | |
|
543 | 543 | By default, the pending changeset will have one parent, |
|
544 | 544 | maintaining a linear history. With --merge, the pending |
|
545 | 545 | changeset will instead have two parents: the old parent of the |
|
546 | 546 | working directory and a new child of REV that simply undoes REV. |
|
547 | 547 | |
|
548 | 548 | Before version 1.7, the behavior without --merge was equivalent |
|
549 | 549 | to specifying --merge followed by :hg:`update --clean .` to |
|
550 | 550 | cancel the merge and leave the child of REV as a head to be |
|
551 | 551 | merged separately. |
|
552 | 552 | |
|
553 | 553 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
554 | 554 | |
|
555 | 555 | See :hg:`help revert` for a way to restore files to the state |
|
556 | 556 | of another revision. |
|
557 | 557 | |
|
558 | 558 | Returns 0 on success, 1 if nothing to backout or there are unresolved |
|
559 | 559 | files. |
|
560 | 560 | ''' |
|
561 | 561 | wlock = lock = None |
|
562 | 562 | try: |
|
563 | 563 | wlock = repo.wlock() |
|
564 | 564 | lock = repo.lock() |
|
565 | 565 | return _dobackout(ui, repo, node, rev, **opts) |
|
566 | 566 | finally: |
|
567 | 567 | release(lock, wlock) |
|
568 | 568 | |
|
569 | 569 | def _dobackout(ui, repo, node=None, rev=None, **opts): |
|
570 | 570 | opts = pycompat.byteskwargs(opts) |
|
571 | 571 | if opts.get('commit') and opts.get('no_commit'): |
|
572 | 572 | raise error.Abort(_("cannot use --commit with --no-commit")) |
|
573 | 573 | if opts.get('merge') and opts.get('no_commit'): |
|
574 | 574 | raise error.Abort(_("cannot use --merge with --no-commit")) |
|
575 | 575 | |
|
576 | 576 | if rev and node: |
|
577 | 577 | raise error.Abort(_("please specify just one revision")) |
|
578 | 578 | |
|
579 | 579 | if not rev: |
|
580 | 580 | rev = node |
|
581 | 581 | |
|
582 | 582 | if not rev: |
|
583 | 583 | raise error.Abort(_("please specify a revision to backout")) |
|
584 | 584 | |
|
585 | 585 | date = opts.get('date') |
|
586 | 586 | if date: |
|
587 | 587 | opts['date'] = util.parsedate(date) |
|
588 | 588 | |
|
589 | 589 | cmdutil.checkunfinished(repo) |
|
590 | 590 | cmdutil.bailifchanged(repo) |
|
591 | 591 | node = scmutil.revsingle(repo, rev).node() |
|
592 | 592 | |
|
593 | 593 | op1, op2 = repo.dirstate.parents() |
|
594 | 594 | if not repo.changelog.isancestor(node, op1): |
|
595 | 595 | raise error.Abort(_('cannot backout change that is not an ancestor')) |
|
596 | 596 | |
|
597 | 597 | p1, p2 = repo.changelog.parents(node) |
|
598 | 598 | if p1 == nullid: |
|
599 | 599 | raise error.Abort(_('cannot backout a change with no parents')) |
|
600 | 600 | if p2 != nullid: |
|
601 | 601 | if not opts.get('parent'): |
|
602 | 602 | raise error.Abort(_('cannot backout a merge changeset')) |
|
603 | 603 | p = repo.lookup(opts['parent']) |
|
604 | 604 | if p not in (p1, p2): |
|
605 | 605 | raise error.Abort(_('%s is not a parent of %s') % |
|
606 | 606 | (short(p), short(node))) |
|
607 | 607 | parent = p |
|
608 | 608 | else: |
|
609 | 609 | if opts.get('parent'): |
|
610 | 610 | raise error.Abort(_('cannot use --parent on non-merge changeset')) |
|
611 | 611 | parent = p1 |
|
612 | 612 | |
|
613 | 613 | # the backout should appear on the same branch |
|
614 | 614 | branch = repo.dirstate.branch() |
|
615 | 615 | bheads = repo.branchheads(branch) |
|
616 | 616 | rctx = scmutil.revsingle(repo, hex(parent)) |
|
617 | 617 | if not opts.get('merge') and op1 != node: |
|
618 | 618 | dsguard = dirstateguard.dirstateguard(repo, 'backout') |
|
619 | 619 | try: |
|
620 | 620 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), |
|
621 | 621 | 'backout') |
|
622 | 622 | stats = mergemod.update(repo, parent, True, True, node, False) |
|
623 | 623 | repo.setparents(op1, op2) |
|
624 | 624 | dsguard.close() |
|
625 | 625 | hg._showstats(repo, stats) |
|
626 | 626 | if stats[3]: |
|
627 | 627 | repo.ui.status(_("use 'hg resolve' to retry unresolved " |
|
628 | 628 | "file merges\n")) |
|
629 | 629 | return 1 |
|
630 | 630 | finally: |
|
631 | 631 | ui.setconfig('ui', 'forcemerge', '', '') |
|
632 | 632 | lockmod.release(dsguard) |
|
633 | 633 | else: |
|
634 | 634 | hg.clean(repo, node, show_stats=False) |
|
635 | 635 | repo.dirstate.setbranch(branch) |
|
636 | 636 | cmdutil.revert(ui, repo, rctx, repo.dirstate.parents()) |
|
637 | 637 | |
|
638 | 638 | if opts.get('no_commit'): |
|
639 | 639 | msg = _("changeset %s backed out, " |
|
640 | 640 | "don't forget to commit.\n") |
|
641 | 641 | ui.status(msg % short(node)) |
|
642 | 642 | return 0 |
|
643 | 643 | |
|
644 | 644 | def commitfunc(ui, repo, message, match, opts): |
|
645 | 645 | editform = 'backout' |
|
646 | 646 | e = cmdutil.getcommiteditor(editform=editform, |
|
647 | 647 | **pycompat.strkwargs(opts)) |
|
648 | 648 | if not message: |
|
649 | 649 | # we don't translate commit messages |
|
650 | 650 | message = "Backed out changeset %s" % short(node) |
|
651 | 651 | e = cmdutil.getcommiteditor(edit=True, editform=editform) |
|
652 | 652 | return repo.commit(message, opts.get('user'), opts.get('date'), |
|
653 | 653 | match, editor=e) |
|
654 | 654 | newnode = cmdutil.commit(ui, repo, commitfunc, [], opts) |
|
655 | 655 | if not newnode: |
|
656 | 656 | ui.status(_("nothing changed\n")) |
|
657 | 657 | return 1 |
|
658 | 658 | cmdutil.commitstatus(repo, newnode, branch, bheads) |
|
659 | 659 | |
|
660 | 660 | def nice(node): |
|
661 | 661 | return '%d:%s' % (repo.changelog.rev(node), short(node)) |
|
662 | 662 | ui.status(_('changeset %s backs out changeset %s\n') % |
|
663 | 663 | (nice(repo.changelog.tip()), nice(node))) |
|
664 | 664 | if opts.get('merge') and op1 != node: |
|
665 | 665 | hg.clean(repo, op1, show_stats=False) |
|
666 | 666 | ui.status(_('merging with changeset %s\n') |
|
667 | 667 | % nice(repo.changelog.tip())) |
|
668 | 668 | try: |
|
669 | 669 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), |
|
670 | 670 | 'backout') |
|
671 | 671 | return hg.merge(repo, hex(repo.changelog.tip())) |
|
672 | 672 | finally: |
|
673 | 673 | ui.setconfig('ui', 'forcemerge', '', '') |
|
674 | 674 | return 0 |
|
675 | 675 | |
|
676 | 676 | @command('bisect', |
|
677 | 677 | [('r', 'reset', False, _('reset bisect state')), |
|
678 | 678 | ('g', 'good', False, _('mark changeset good')), |
|
679 | 679 | ('b', 'bad', False, _('mark changeset bad')), |
|
680 | 680 | ('s', 'skip', False, _('skip testing changeset')), |
|
681 | 681 | ('e', 'extend', False, _('extend the bisect range')), |
|
682 | 682 | ('c', 'command', '', _('use command to check changeset state'), _('CMD')), |
|
683 | 683 | ('U', 'noupdate', False, _('do not update to target'))], |
|
684 | 684 | _("[-gbsr] [-U] [-c CMD] [REV]")) |
|
685 | 685 | def bisect(ui, repo, rev=None, extra=None, command=None, |
|
686 | 686 | reset=None, good=None, bad=None, skip=None, extend=None, |
|
687 | 687 | noupdate=None): |
|
688 | 688 | """subdivision search of changesets |
|
689 | 689 | |
|
690 | 690 | This command helps to find changesets which introduce problems. To |
|
691 | 691 | use, mark the earliest changeset you know exhibits the problem as |
|
692 | 692 | bad, then mark the latest changeset which is free from the problem |
|
693 | 693 | as good. Bisect will update your working directory to a revision |
|
694 | 694 | for testing (unless the -U/--noupdate option is specified). Once |
|
695 | 695 | you have performed tests, mark the working directory as good or |
|
696 | 696 | bad, and bisect will either update to another candidate changeset |
|
697 | 697 | or announce that it has found the bad revision. |
|
698 | 698 | |
|
699 | 699 | As a shortcut, you can also use the revision argument to mark a |
|
700 | 700 | revision as good or bad without checking it out first. |
|
701 | 701 | |
|
702 | 702 | If you supply a command, it will be used for automatic bisection. |
|
703 | 703 | The environment variable HG_NODE will contain the ID of the |
|
704 | 704 | changeset being tested. The exit status of the command will be |
|
705 | 705 | used to mark revisions as good or bad: status 0 means good, 125 |
|
706 | 706 | means to skip the revision, 127 (command not found) will abort the |
|
707 | 707 | bisection, and any other non-zero exit status means the revision |
|
708 | 708 | is bad. |
|
709 | 709 | |
|
710 | 710 | .. container:: verbose |
|
711 | 711 | |
|
712 | 712 | Some examples: |
|
713 | 713 | |
|
714 | 714 | - start a bisection with known bad revision 34, and good revision 12:: |
|
715 | 715 | |
|
716 | 716 | hg bisect --bad 34 |
|
717 | 717 | hg bisect --good 12 |
|
718 | 718 | |
|
719 | 719 | - advance the current bisection by marking current revision as good or |
|
720 | 720 | bad:: |
|
721 | 721 | |
|
722 | 722 | hg bisect --good |
|
723 | 723 | hg bisect --bad |
|
724 | 724 | |
|
725 | 725 | - mark the current revision, or a known revision, to be skipped (e.g. if |
|
726 | 726 | that revision is not usable because of another issue):: |
|
727 | 727 | |
|
728 | 728 | hg bisect --skip |
|
729 | 729 | hg bisect --skip 23 |
|
730 | 730 | |
|
731 | 731 | - skip all revisions that do not touch directories ``foo`` or ``bar``:: |
|
732 | 732 | |
|
733 | 733 | hg bisect --skip "!( file('path:foo') & file('path:bar') )" |
|
734 | 734 | |
|
735 | 735 | - forget the current bisection:: |
|
736 | 736 | |
|
737 | 737 | hg bisect --reset |
|
738 | 738 | |
|
739 | 739 | - use 'make && make tests' to automatically find the first broken |
|
740 | 740 | revision:: |
|
741 | 741 | |
|
742 | 742 | hg bisect --reset |
|
743 | 743 | hg bisect --bad 34 |
|
744 | 744 | hg bisect --good 12 |
|
745 | 745 | hg bisect --command "make && make tests" |
|
746 | 746 | |
|
747 | 747 | - see all changesets whose states are already known in the current |
|
748 | 748 | bisection:: |
|
749 | 749 | |
|
750 | 750 | hg log -r "bisect(pruned)" |
|
751 | 751 | |
|
752 | 752 | - see the changeset currently being bisected (especially useful |
|
753 | 753 | if running with -U/--noupdate):: |
|
754 | 754 | |
|
755 | 755 | hg log -r "bisect(current)" |
|
756 | 756 | |
|
757 | 757 | - see all changesets that took part in the current bisection:: |
|
758 | 758 | |
|
759 | 759 | hg log -r "bisect(range)" |
|
760 | 760 | |
|
761 | 761 | - you can even get a nice graph:: |
|
762 | 762 | |
|
763 | 763 | hg log --graph -r "bisect(range)" |
|
764 | 764 | |
|
765 | 765 | See :hg:`help revisions.bisect` for more about the `bisect()` predicate. |
|
766 | 766 | |
|
767 | 767 | Returns 0 on success. |
|
768 | 768 | """ |
|
769 | 769 | # backward compatibility |
|
770 | 770 | if rev in "good bad reset init".split(): |
|
771 | 771 | ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n")) |
|
772 | 772 | cmd, rev, extra = rev, extra, None |
|
773 | 773 | if cmd == "good": |
|
774 | 774 | good = True |
|
775 | 775 | elif cmd == "bad": |
|
776 | 776 | bad = True |
|
777 | 777 | else: |
|
778 | 778 | reset = True |
|
779 | 779 | elif extra: |
|
780 | 780 | raise error.Abort(_('incompatible arguments')) |
|
781 | 781 | |
|
782 | 782 | incompatibles = { |
|
783 | 783 | '--bad': bad, |
|
784 | 784 | '--command': bool(command), |
|
785 | 785 | '--extend': extend, |
|
786 | 786 | '--good': good, |
|
787 | 787 | '--reset': reset, |
|
788 | 788 | '--skip': skip, |
|
789 | 789 | } |
|
790 | 790 | |
|
791 | 791 | enabled = [x for x in incompatibles if incompatibles[x]] |
|
792 | 792 | |
|
793 | 793 | if len(enabled) > 1: |
|
794 | 794 | raise error.Abort(_('%s and %s are incompatible') % |
|
795 | 795 | tuple(sorted(enabled)[0:2])) |
|
796 | 796 | |
|
797 | 797 | if reset: |
|
798 | 798 | hbisect.resetstate(repo) |
|
799 | 799 | return |
|
800 | 800 | |
|
801 | 801 | state = hbisect.load_state(repo) |
|
802 | 802 | |
|
803 | 803 | # update state |
|
804 | 804 | if good or bad or skip: |
|
805 | 805 | if rev: |
|
806 | 806 | nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])] |
|
807 | 807 | else: |
|
808 | 808 | nodes = [repo.lookup('.')] |
|
809 | 809 | if good: |
|
810 | 810 | state['good'] += nodes |
|
811 | 811 | elif bad: |
|
812 | 812 | state['bad'] += nodes |
|
813 | 813 | elif skip: |
|
814 | 814 | state['skip'] += nodes |
|
815 | 815 | hbisect.save_state(repo, state) |
|
816 | 816 | if not (state['good'] and state['bad']): |
|
817 | 817 | return |
|
818 | 818 | |
|
819 | 819 | def mayupdate(repo, node, show_stats=True): |
|
820 | 820 | """common used update sequence""" |
|
821 | 821 | if noupdate: |
|
822 | 822 | return |
|
823 | 823 | cmdutil.checkunfinished(repo) |
|
824 | 824 | cmdutil.bailifchanged(repo) |
|
825 | 825 | return hg.clean(repo, node, show_stats=show_stats) |
|
826 | 826 | |
|
827 | 827 | displayer = logcmdutil.changesetdisplayer(ui, repo, {}) |
|
828 | 828 | |
|
829 | 829 | if command: |
|
830 | 830 | changesets = 1 |
|
831 | 831 | if noupdate: |
|
832 | 832 | try: |
|
833 | 833 | node = state['current'][0] |
|
834 | 834 | except LookupError: |
|
835 | 835 | raise error.Abort(_('current bisect revision is unknown - ' |
|
836 | 836 | 'start a new bisect to fix')) |
|
837 | 837 | else: |
|
838 | 838 | node, p2 = repo.dirstate.parents() |
|
839 | 839 | if p2 != nullid: |
|
840 | 840 | raise error.Abort(_('current bisect revision is a merge')) |
|
841 | 841 | if rev: |
|
842 | 842 | node = repo[scmutil.revsingle(repo, rev, node)].node() |
|
843 | 843 | try: |
|
844 | 844 | while changesets: |
|
845 | 845 | # update state |
|
846 | 846 | state['current'] = [node] |
|
847 | 847 | hbisect.save_state(repo, state) |
|
848 | 848 | status = ui.system(command, environ={'HG_NODE': hex(node)}, |
|
849 | 849 | blockedtag='bisect_check') |
|
850 | 850 | if status == 125: |
|
851 | 851 | transition = "skip" |
|
852 | 852 | elif status == 0: |
|
853 | 853 | transition = "good" |
|
854 | 854 | # status < 0 means process was killed |
|
855 | 855 | elif status == 127: |
|
856 | 856 | raise error.Abort(_("failed to execute %s") % command) |
|
857 | 857 | elif status < 0: |
|
858 | 858 | raise error.Abort(_("%s killed") % command) |
|
859 | 859 | else: |
|
860 | 860 | transition = "bad" |
|
861 | 861 | state[transition].append(node) |
|
862 | 862 | ctx = repo[node] |
|
863 | 863 | ui.status(_('changeset %d:%s: %s\n') % (ctx.rev(), ctx, |
|
864 | 864 | transition)) |
|
865 | 865 | hbisect.checkstate(state) |
|
866 | 866 | # bisect |
|
867 | 867 | nodes, changesets, bgood = hbisect.bisect(repo, state) |
|
868 | 868 | # update to next check |
|
869 | 869 | node = nodes[0] |
|
870 | 870 | mayupdate(repo, node, show_stats=False) |
|
871 | 871 | finally: |
|
872 | 872 | state['current'] = [node] |
|
873 | 873 | hbisect.save_state(repo, state) |
|
874 | 874 | hbisect.printresult(ui, repo, state, displayer, nodes, bgood) |
|
875 | 875 | return |
|
876 | 876 | |
|
877 | 877 | hbisect.checkstate(state) |
|
878 | 878 | |
|
879 | 879 | # actually bisect |
|
880 | 880 | nodes, changesets, good = hbisect.bisect(repo, state) |
|
881 | 881 | if extend: |
|
882 | 882 | if not changesets: |
|
883 | 883 | extendnode = hbisect.extendrange(repo, state, nodes, good) |
|
884 | 884 | if extendnode is not None: |
|
885 | 885 | ui.write(_("Extending search to changeset %d:%s\n") |
|
886 | 886 | % (extendnode.rev(), extendnode)) |
|
887 | 887 | state['current'] = [extendnode.node()] |
|
888 | 888 | hbisect.save_state(repo, state) |
|
889 | 889 | return mayupdate(repo, extendnode.node()) |
|
890 | 890 | raise error.Abort(_("nothing to extend")) |
|
891 | 891 | |
|
892 | 892 | if changesets == 0: |
|
893 | 893 | hbisect.printresult(ui, repo, state, displayer, nodes, good) |
|
894 | 894 | else: |
|
895 | 895 | assert len(nodes) == 1 # only a single node can be tested next |
|
896 | 896 | node = nodes[0] |
|
897 | 897 | # compute the approximate number of remaining tests |
|
898 | 898 | tests, size = 0, 2 |
|
899 | 899 | while size <= changesets: |
|
900 | 900 | tests, size = tests + 1, size * 2 |
|
901 | 901 | rev = repo.changelog.rev(node) |
|
902 | 902 | ui.write(_("Testing changeset %d:%s " |
|
903 | 903 | "(%d changesets remaining, ~%d tests)\n") |
|
904 | 904 | % (rev, short(node), changesets, tests)) |
|
905 | 905 | state['current'] = [node] |
|
906 | 906 | hbisect.save_state(repo, state) |
|
907 | 907 | return mayupdate(repo, node) |
|
908 | 908 | |
|
909 | 909 | @command('bookmarks|bookmark', |
|
910 | 910 | [('f', 'force', False, _('force')), |
|
911 | 911 | ('r', 'rev', '', _('revision for bookmark action'), _('REV')), |
|
912 | 912 | ('d', 'delete', False, _('delete a given bookmark')), |
|
913 | 913 | ('m', 'rename', '', _('rename a given bookmark'), _('OLD')), |
|
914 | 914 | ('i', 'inactive', False, _('mark a bookmark inactive')), |
|
915 | 915 | ] + formatteropts, |
|
916 | 916 | _('hg bookmarks [OPTIONS]... [NAME]...')) |
|
917 | 917 | def bookmark(ui, repo, *names, **opts): |
|
918 | 918 | '''create a new bookmark or list existing bookmarks |
|
919 | 919 | |
|
920 | 920 | Bookmarks are labels on changesets to help track lines of development. |
|
921 | 921 | Bookmarks are unversioned and can be moved, renamed and deleted. |
|
922 | 922 | Deleting or moving a bookmark has no effect on the associated changesets. |
|
923 | 923 | |
|
924 | 924 | Creating or updating to a bookmark causes it to be marked as 'active'. |
|
925 | 925 | The active bookmark is indicated with a '*'. |
|
926 | 926 | When a commit is made, the active bookmark will advance to the new commit. |
|
927 | 927 | A plain :hg:`update` will also advance an active bookmark, if possible. |
|
928 | 928 | Updating away from a bookmark will cause it to be deactivated. |
|
929 | 929 | |
|
930 | 930 | Bookmarks can be pushed and pulled between repositories (see |
|
931 | 931 | :hg:`help push` and :hg:`help pull`). If a shared bookmark has |
|
932 | 932 | diverged, a new 'divergent bookmark' of the form 'name@path' will |
|
933 | 933 | be created. Using :hg:`merge` will resolve the divergence. |
|
934 | 934 | |
|
935 | 935 | Specifying bookmark as '.' to -m or -d options is equivalent to specifying |
|
936 | 936 | the active bookmark's name. |
|
937 | 937 | |
|
938 | 938 | A bookmark named '@' has the special property that :hg:`clone` will |
|
939 | 939 | check it out by default if it exists. |
|
940 | 940 | |
|
941 | 941 | .. container:: verbose |
|
942 | 942 | |
|
943 | 943 | Examples: |
|
944 | 944 | |
|
945 | 945 | - create an active bookmark for a new line of development:: |
|
946 | 946 | |
|
947 | 947 | hg book new-feature |
|
948 | 948 | |
|
949 | 949 | - create an inactive bookmark as a place marker:: |
|
950 | 950 | |
|
951 | 951 | hg book -i reviewed |
|
952 | 952 | |
|
953 | 953 | - create an inactive bookmark on another changeset:: |
|
954 | 954 | |
|
955 | 955 | hg book -r .^ tested |
|
956 | 956 | |
|
957 | 957 | - rename bookmark turkey to dinner:: |
|
958 | 958 | |
|
959 | 959 | hg book -m turkey dinner |
|
960 | 960 | |
|
961 | 961 | - move the '@' bookmark from another branch:: |
|
962 | 962 | |
|
963 | 963 | hg book -f @ |
|
964 | 964 | ''' |
|
965 | 965 | force = opts.get(r'force') |
|
966 | 966 | rev = opts.get(r'rev') |
|
967 | 967 | delete = opts.get(r'delete') |
|
968 | 968 | rename = opts.get(r'rename') |
|
969 | 969 | inactive = opts.get(r'inactive') |
|
970 | 970 | |
|
971 | 971 | if delete and rename: |
|
972 | 972 | raise error.Abort(_("--delete and --rename are incompatible")) |
|
973 | 973 | if delete and rev: |
|
974 | 974 | raise error.Abort(_("--rev is incompatible with --delete")) |
|
975 | 975 | if rename and rev: |
|
976 | 976 | raise error.Abort(_("--rev is incompatible with --rename")) |
|
977 | 977 | if not names and (delete or rev): |
|
978 | 978 | raise error.Abort(_("bookmark name required")) |
|
979 | 979 | |
|
980 | 980 | if delete or rename or names or inactive: |
|
981 | 981 | with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr: |
|
982 | 982 | if delete: |
|
983 | 983 | names = pycompat.maplist(repo._bookmarks.expandname, names) |
|
984 | 984 | bookmarks.delete(repo, tr, names) |
|
985 | 985 | elif rename: |
|
986 | 986 | if not names: |
|
987 | 987 | raise error.Abort(_("new bookmark name required")) |
|
988 | 988 | elif len(names) > 1: |
|
989 | 989 | raise error.Abort(_("only one new bookmark name allowed")) |
|
990 | 990 | rename = repo._bookmarks.expandname(rename) |
|
991 | 991 | bookmarks.rename(repo, tr, rename, names[0], force, inactive) |
|
992 | 992 | elif names: |
|
993 | 993 | bookmarks.addbookmarks(repo, tr, names, rev, force, inactive) |
|
994 | 994 | elif inactive: |
|
995 | 995 | if len(repo._bookmarks) == 0: |
|
996 | 996 | ui.status(_("no bookmarks set\n")) |
|
997 | 997 | elif not repo._activebookmark: |
|
998 | 998 | ui.status(_("no active bookmark\n")) |
|
999 | 999 | else: |
|
1000 | 1000 | bookmarks.deactivate(repo) |
|
1001 | 1001 | else: # show bookmarks |
|
1002 | 1002 | bookmarks.printbookmarks(ui, repo, **opts) |
|
1003 | 1003 | |
|
1004 | 1004 | @command('branch', |
|
1005 | 1005 | [('f', 'force', None, |
|
1006 | 1006 | _('set branch name even if it shadows an existing branch')), |
|
1007 | 1007 | ('C', 'clean', None, _('reset branch name to parent branch name')), |
|
1008 | 1008 | ('r', 'rev', [], _('change branches of the given revs (EXPERIMENTAL)')), |
|
1009 | 1009 | ], |
|
1010 | 1010 | _('[-fC] [NAME]')) |
|
1011 | 1011 | def branch(ui, repo, label=None, **opts): |
|
1012 | 1012 | """set or show the current branch name |
|
1013 | 1013 | |
|
1014 | 1014 | .. note:: |
|
1015 | 1015 | |
|
1016 | 1016 | Branch names are permanent and global. Use :hg:`bookmark` to create a |
|
1017 | 1017 | light-weight bookmark instead. See :hg:`help glossary` for more |
|
1018 | 1018 | information about named branches and bookmarks. |
|
1019 | 1019 | |
|
1020 | 1020 | With no argument, show the current branch name. With one argument, |
|
1021 | 1021 | set the working directory branch name (the branch will not exist |
|
1022 | 1022 | in the repository until the next commit). Standard practice |
|
1023 | 1023 | recommends that primary development take place on the 'default' |
|
1024 | 1024 | branch. |
|
1025 | 1025 | |
|
1026 | 1026 | Unless -f/--force is specified, branch will not let you set a |
|
1027 | 1027 | branch name that already exists. |
|
1028 | 1028 | |
|
1029 | 1029 | Use -C/--clean to reset the working directory branch to that of |
|
1030 | 1030 | the parent of the working directory, negating a previous branch |
|
1031 | 1031 | change. |
|
1032 | 1032 | |
|
1033 | 1033 | Use the command :hg:`update` to switch to an existing branch. Use |
|
1034 | 1034 | :hg:`commit --close-branch` to mark this branch head as closed. |
|
1035 | 1035 | When all heads of a branch are closed, the branch will be |
|
1036 | 1036 | considered closed. |
|
1037 | 1037 | |
|
1038 | 1038 | Returns 0 on success. |
|
1039 | 1039 | """ |
|
1040 | 1040 | opts = pycompat.byteskwargs(opts) |
|
1041 | 1041 | revs = opts.get('rev') |
|
1042 | 1042 | if label: |
|
1043 | 1043 | label = label.strip() |
|
1044 | 1044 | |
|
1045 | 1045 | if not opts.get('clean') and not label: |
|
1046 | 1046 | if revs: |
|
1047 | 1047 | raise error.Abort(_("no branch name specified for the revisions")) |
|
1048 | 1048 | ui.write("%s\n" % repo.dirstate.branch()) |
|
1049 | 1049 | return |
|
1050 | 1050 | |
|
1051 | 1051 | with repo.wlock(): |
|
1052 | 1052 | if opts.get('clean'): |
|
1053 | 1053 | label = repo[None].p1().branch() |
|
1054 | 1054 | repo.dirstate.setbranch(label) |
|
1055 | 1055 | ui.status(_('reset working directory to branch %s\n') % label) |
|
1056 | 1056 | elif label: |
|
1057 | 1057 | |
|
1058 | 1058 | scmutil.checknewlabel(repo, label, 'branch') |
|
1059 | 1059 | if revs: |
|
1060 | 1060 | return cmdutil.changebranch(ui, repo, revs, label) |
|
1061 | 1061 | |
|
1062 | 1062 | if not opts.get('force') and label in repo.branchmap(): |
|
1063 | 1063 | if label not in [p.branch() for p in repo[None].parents()]: |
|
1064 | 1064 | raise error.Abort(_('a branch of the same name already' |
|
1065 | 1065 | ' exists'), |
|
1066 | 1066 | # i18n: "it" refers to an existing branch |
|
1067 | 1067 | hint=_("use 'hg update' to switch to it")) |
|
1068 | 1068 | |
|
1069 | 1069 | repo.dirstate.setbranch(label) |
|
1070 | 1070 | ui.status(_('marked working directory as branch %s\n') % label) |
|
1071 | 1071 | |
|
1072 | 1072 | # find any open named branches aside from default |
|
1073 | 1073 | others = [n for n, h, t, c in repo.branchmap().iterbranches() |
|
1074 | 1074 | if n != "default" and not c] |
|
1075 | 1075 | if not others: |
|
1076 | 1076 | ui.status(_('(branches are permanent and global, ' |
|
1077 | 1077 | 'did you want a bookmark?)\n')) |
|
1078 | 1078 | |
|
1079 | 1079 | @command('branches', |
|
1080 | 1080 | [('a', 'active', False, |
|
1081 | 1081 | _('show only branches that have unmerged heads (DEPRECATED)')), |
|
1082 | 1082 | ('c', 'closed', False, _('show normal and closed branches')), |
|
1083 | 1083 | ] + formatteropts, |
|
1084 | 1084 | _('[-c]'), cmdtype=readonly) |
|
1085 | 1085 | def branches(ui, repo, active=False, closed=False, **opts): |
|
1086 | 1086 | """list repository named branches |
|
1087 | 1087 | |
|
1088 | 1088 | List the repository's named branches, indicating which ones are |
|
1089 | 1089 | inactive. If -c/--closed is specified, also list branches which have |
|
1090 | 1090 | been marked closed (see :hg:`commit --close-branch`). |
|
1091 | 1091 | |
|
1092 | 1092 | Use the command :hg:`update` to switch to an existing branch. |
|
1093 | 1093 | |
|
1094 | 1094 | Returns 0. |
|
1095 | 1095 | """ |
|
1096 | 1096 | |
|
1097 | 1097 | opts = pycompat.byteskwargs(opts) |
|
1098 | 1098 | ui.pager('branches') |
|
1099 | 1099 | fm = ui.formatter('branches', opts) |
|
1100 | 1100 | hexfunc = fm.hexfunc |
|
1101 | 1101 | |
|
1102 | 1102 | allheads = set(repo.heads()) |
|
1103 | 1103 | branches = [] |
|
1104 | 1104 | for tag, heads, tip, isclosed in repo.branchmap().iterbranches(): |
|
1105 | 1105 | isactive = False |
|
1106 | 1106 | if not isclosed: |
|
1107 | 1107 | openheads = set(repo.branchmap().iteropen(heads)) |
|
1108 | 1108 | isactive = bool(openheads & allheads) |
|
1109 | 1109 | branches.append((tag, repo[tip], isactive, not isclosed)) |
|
1110 | 1110 | branches.sort(key=lambda i: (i[2], i[1].rev(), i[0], i[3]), |
|
1111 | 1111 | reverse=True) |
|
1112 | 1112 | |
|
1113 | 1113 | for tag, ctx, isactive, isopen in branches: |
|
1114 | 1114 | if active and not isactive: |
|
1115 | 1115 | continue |
|
1116 | 1116 | if isactive: |
|
1117 | 1117 | label = 'branches.active' |
|
1118 | 1118 | notice = '' |
|
1119 | 1119 | elif not isopen: |
|
1120 | 1120 | if not closed: |
|
1121 | 1121 | continue |
|
1122 | 1122 | label = 'branches.closed' |
|
1123 | 1123 | notice = _(' (closed)') |
|
1124 | 1124 | else: |
|
1125 | 1125 | label = 'branches.inactive' |
|
1126 | 1126 | notice = _(' (inactive)') |
|
1127 | 1127 | current = (tag == repo.dirstate.branch()) |
|
1128 | 1128 | if current: |
|
1129 | 1129 | label = 'branches.current' |
|
1130 | 1130 | |
|
1131 | 1131 | fm.startitem() |
|
1132 | 1132 | fm.write('branch', '%s', tag, label=label) |
|
1133 | 1133 | rev = ctx.rev() |
|
1134 | 1134 | padsize = max(31 - len(str(rev)) - encoding.colwidth(tag), 0) |
|
1135 | 1135 | fmt = ' ' * padsize + ' %d:%s' |
|
1136 | 1136 | fm.condwrite(not ui.quiet, 'rev node', fmt, rev, hexfunc(ctx.node()), |
|
1137 | 1137 | label='log.changeset changeset.%s' % ctx.phasestr()) |
|
1138 | 1138 | fm.context(ctx=ctx) |
|
1139 | 1139 | fm.data(active=isactive, closed=not isopen, current=current) |
|
1140 | 1140 | if not ui.quiet: |
|
1141 | 1141 | fm.plain(notice) |
|
1142 | 1142 | fm.plain('\n') |
|
1143 | 1143 | fm.end() |
|
1144 | 1144 | |
|
1145 | 1145 | @command('bundle', |
|
1146 | 1146 | [('f', 'force', None, _('run even when the destination is unrelated')), |
|
1147 | 1147 | ('r', 'rev', [], _('a changeset intended to be added to the destination'), |
|
1148 | 1148 | _('REV')), |
|
1149 | 1149 | ('b', 'branch', [], _('a specific branch you would like to bundle'), |
|
1150 | 1150 | _('BRANCH')), |
|
1151 | 1151 | ('', 'base', [], |
|
1152 | 1152 | _('a base changeset assumed to be available at the destination'), |
|
1153 | 1153 | _('REV')), |
|
1154 | 1154 | ('a', 'all', None, _('bundle all changesets in the repository')), |
|
1155 | 1155 | ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')), |
|
1156 | 1156 | ] + remoteopts, |
|
1157 | 1157 | _('[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]')) |
|
1158 | 1158 | def bundle(ui, repo, fname, dest=None, **opts): |
|
1159 | 1159 | """create a bundle file |
|
1160 | 1160 | |
|
1161 | 1161 | Generate a bundle file containing data to be transferred to another |
|
1162 | 1162 | repository. |
|
1163 | 1163 | |
|
1164 | 1164 | To create a bundle containing all changesets, use -a/--all |
|
1165 | 1165 | (or --base null). Otherwise, hg assumes the destination will have |
|
1166 | 1166 | all the nodes you specify with --base parameters. Otherwise, hg |
|
1167 | 1167 | will assume the repository has all the nodes in destination, or |
|
1168 | 1168 | default-push/default if no destination is specified, where destination |
|
1169 | 1169 | is the repository you provide through DEST option. |
|
1170 | 1170 | |
|
1171 | 1171 | You can change bundle format with the -t/--type option. See |
|
1172 | 1172 | :hg:`help bundlespec` for documentation on this format. By default, |
|
1173 | 1173 | the most appropriate format is used and compression defaults to |
|
1174 | 1174 | bzip2. |
|
1175 | 1175 | |
|
1176 | 1176 | The bundle file can then be transferred using conventional means |
|
1177 | 1177 | and applied to another repository with the unbundle or pull |
|
1178 | 1178 | command. This is useful when direct push and pull are not |
|
1179 | 1179 | available or when exporting an entire repository is undesirable. |
|
1180 | 1180 | |
|
1181 | 1181 | Applying bundles preserves all changeset contents including |
|
1182 | 1182 | permissions, copy/rename information, and revision history. |
|
1183 | 1183 | |
|
1184 | 1184 | Returns 0 on success, 1 if no changes found. |
|
1185 | 1185 | """ |
|
1186 | 1186 | opts = pycompat.byteskwargs(opts) |
|
1187 | 1187 | revs = None |
|
1188 | 1188 | if 'rev' in opts: |
|
1189 | 1189 | revstrings = opts['rev'] |
|
1190 | 1190 | revs = scmutil.revrange(repo, revstrings) |
|
1191 | 1191 | if revstrings and not revs: |
|
1192 | 1192 | raise error.Abort(_('no commits to bundle')) |
|
1193 | 1193 | |
|
1194 | 1194 | bundletype = opts.get('type', 'bzip2').lower() |
|
1195 | 1195 | try: |
|
1196 | 1196 | bcompression, cgversion, params = exchange.parsebundlespec( |
|
1197 | 1197 | repo, bundletype, strict=False) |
|
1198 | 1198 | except error.UnsupportedBundleSpecification as e: |
|
1199 | 1199 | raise error.Abort(str(e), |
|
1200 | 1200 | hint=_("see 'hg help bundlespec' for supported " |
|
1201 | 1201 | "values for --type")) |
|
1202 | 1202 | |
|
1203 | 1203 | # Packed bundles are a pseudo bundle format for now. |
|
1204 | 1204 | if cgversion == 's1': |
|
1205 | 1205 | raise error.Abort(_('packed bundles cannot be produced by "hg bundle"'), |
|
1206 | 1206 | hint=_("use 'hg debugcreatestreamclonebundle'")) |
|
1207 | 1207 | |
|
1208 | 1208 | if opts.get('all'): |
|
1209 | 1209 | if dest: |
|
1210 | 1210 | raise error.Abort(_("--all is incompatible with specifying " |
|
1211 | 1211 | "a destination")) |
|
1212 | 1212 | if opts.get('base'): |
|
1213 | 1213 | ui.warn(_("ignoring --base because --all was specified\n")) |
|
1214 | 1214 | base = ['null'] |
|
1215 | 1215 | else: |
|
1216 | 1216 | base = scmutil.revrange(repo, opts.get('base')) |
|
1217 | 1217 | if cgversion not in changegroup.supportedoutgoingversions(repo): |
|
1218 | 1218 | raise error.Abort(_("repository does not support bundle version %s") % |
|
1219 | 1219 | cgversion) |
|
1220 | 1220 | |
|
1221 | 1221 | if base: |
|
1222 | 1222 | if dest: |
|
1223 | 1223 | raise error.Abort(_("--base is incompatible with specifying " |
|
1224 | 1224 | "a destination")) |
|
1225 | 1225 | common = [repo.lookup(rev) for rev in base] |
|
1226 | 1226 | heads = [repo.lookup(r) for r in revs] if revs else None |
|
1227 | 1227 | outgoing = discovery.outgoing(repo, common, heads) |
|
1228 | 1228 | else: |
|
1229 | 1229 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
1230 | 1230 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
1231 | 1231 | other = hg.peer(repo, opts, dest) |
|
1232 | 1232 | revs, checkout = hg.addbranchrevs(repo, repo, branches, revs) |
|
1233 | 1233 | heads = revs and map(repo.lookup, revs) or revs |
|
1234 | 1234 | outgoing = discovery.findcommonoutgoing(repo, other, |
|
1235 | 1235 | onlyheads=heads, |
|
1236 | 1236 | force=opts.get('force'), |
|
1237 | 1237 | portable=True) |
|
1238 | 1238 | |
|
1239 | 1239 | if not outgoing.missing: |
|
1240 | 1240 | scmutil.nochangesfound(ui, repo, not base and outgoing.excluded) |
|
1241 | 1241 | return 1 |
|
1242 | 1242 | |
|
1243 | 1243 | if cgversion == '01': #bundle1 |
|
1244 | 1244 | if bcompression is None: |
|
1245 | 1245 | bcompression = 'UN' |
|
1246 | 1246 | bversion = 'HG10' + bcompression |
|
1247 | 1247 | bcompression = None |
|
1248 | 1248 | elif cgversion in ('02', '03'): |
|
1249 | 1249 | bversion = 'HG20' |
|
1250 | 1250 | else: |
|
1251 | 1251 | raise error.ProgrammingError( |
|
1252 | 1252 | 'bundle: unexpected changegroup version %s' % cgversion) |
|
1253 | 1253 | |
|
1254 | 1254 | # TODO compression options should be derived from bundlespec parsing. |
|
1255 | 1255 | # This is a temporary hack to allow adjusting bundle compression |
|
1256 | 1256 | # level without a) formalizing the bundlespec changes to declare it |
|
1257 | 1257 | # b) introducing a command flag. |
|
1258 | 1258 | compopts = {} |
|
1259 | 1259 | complevel = ui.configint('experimental', 'bundlecomplevel') |
|
1260 | 1260 | if complevel is not None: |
|
1261 | 1261 | compopts['level'] = complevel |
|
1262 | 1262 | |
|
1263 | 1263 | |
|
1264 | 1264 | contentopts = {'cg.version': cgversion} |
|
1265 | 1265 | if repo.ui.configbool('experimental', 'evolution.bundle-obsmarker'): |
|
1266 | 1266 | contentopts['obsolescence'] = True |
|
1267 | 1267 | if repo.ui.configbool('experimental', 'bundle-phases'): |
|
1268 | 1268 | contentopts['phases'] = True |
|
1269 | 1269 | bundle2.writenewbundle(ui, repo, 'bundle', fname, bversion, outgoing, |
|
1270 | 1270 | contentopts, compression=bcompression, |
|
1271 | 1271 | compopts=compopts) |
|
1272 | 1272 | |
|
1273 | 1273 | @command('cat', |
|
1274 | 1274 | [('o', 'output', '', |
|
1275 | 1275 | _('print output to file with formatted name'), _('FORMAT')), |
|
1276 | 1276 | ('r', 'rev', '', _('print the given revision'), _('REV')), |
|
1277 | 1277 | ('', 'decode', None, _('apply any matching decode filter')), |
|
1278 | 1278 | ] + walkopts + formatteropts, |
|
1279 | 1279 | _('[OPTION]... FILE...'), |
|
1280 | 1280 | inferrepo=True, cmdtype=readonly) |
|
1281 | 1281 | def cat(ui, repo, file1, *pats, **opts): |
|
1282 | 1282 | """output the current or given revision of files |
|
1283 | 1283 | |
|
1284 | 1284 | Print the specified files as they were at the given revision. If |
|
1285 | 1285 | no revision is given, the parent of the working directory is used. |
|
1286 | 1286 | |
|
1287 | 1287 | Output may be to a file, in which case the name of the file is |
|
1288 | 1288 | given using a format string. The formatting rules as follows: |
|
1289 | 1289 | |
|
1290 | 1290 | :``%%``: literal "%" character |
|
1291 | 1291 | :``%s``: basename of file being printed |
|
1292 | 1292 | :``%d``: dirname of file being printed, or '.' if in repository root |
|
1293 | 1293 | :``%p``: root-relative path name of file being printed |
|
1294 | 1294 | :``%H``: changeset hash (40 hexadecimal digits) |
|
1295 | 1295 | :``%R``: changeset revision number |
|
1296 | 1296 | :``%h``: short-form changeset hash (12 hexadecimal digits) |
|
1297 | 1297 | :``%r``: zero-padded changeset revision number |
|
1298 | 1298 | :``%b``: basename of the exporting repository |
|
1299 | 1299 | |
|
1300 | 1300 | Returns 0 on success. |
|
1301 | 1301 | """ |
|
1302 | 1302 | opts = pycompat.byteskwargs(opts) |
|
1303 | 1303 | rev = opts.get('rev') |
|
1304 | 1304 | if rev: |
|
1305 | 1305 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
1306 | 1306 | ctx = scmutil.revsingle(repo, rev) |
|
1307 | 1307 | m = scmutil.match(ctx, (file1,) + pats, opts) |
|
1308 | 1308 | fntemplate = opts.pop('output', '') |
|
1309 | 1309 | if cmdutil.isstdiofilename(fntemplate): |
|
1310 | 1310 | fntemplate = '' |
|
1311 | 1311 | |
|
1312 | 1312 | if fntemplate: |
|
1313 | 1313 | fm = formatter.nullformatter(ui, 'cat') |
|
1314 | 1314 | else: |
|
1315 | 1315 | ui.pager('cat') |
|
1316 | 1316 | fm = ui.formatter('cat', opts) |
|
1317 | 1317 | with fm: |
|
1318 | 1318 | return cmdutil.cat(ui, repo, ctx, m, fm, fntemplate, '', |
|
1319 | 1319 | **pycompat.strkwargs(opts)) |
|
1320 | 1320 | |
|
1321 | 1321 | @command('^clone', |
|
1322 | 1322 | [('U', 'noupdate', None, _('the clone will include an empty working ' |
|
1323 | 1323 | 'directory (only a repository)')), |
|
1324 | 1324 | ('u', 'updaterev', '', _('revision, tag, or branch to check out'), |
|
1325 | 1325 | _('REV')), |
|
1326 | 1326 | ('r', 'rev', [], _('include the specified changeset'), _('REV')), |
|
1327 | 1327 | ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')), |
|
1328 | 1328 | ('', 'pull', None, _('use pull protocol to copy metadata')), |
|
1329 | 1329 | ('', 'uncompressed', None, |
|
1330 | 1330 | _('an alias to --stream (DEPRECATED)')), |
|
1331 | 1331 | ('', 'stream', None, |
|
1332 | 1332 | _('clone with minimal data processing')), |
|
1333 | 1333 | ] + remoteopts, |
|
1334 | 1334 | _('[OPTION]... SOURCE [DEST]'), |
|
1335 | 1335 | norepo=True) |
|
1336 | 1336 | def clone(ui, source, dest=None, **opts): |
|
1337 | 1337 | """make a copy of an existing repository |
|
1338 | 1338 | |
|
1339 | 1339 | Create a copy of an existing repository in a new directory. |
|
1340 | 1340 | |
|
1341 | 1341 | If no destination directory name is specified, it defaults to the |
|
1342 | 1342 | basename of the source. |
|
1343 | 1343 | |
|
1344 | 1344 | The location of the source is added to the new repository's |
|
1345 | 1345 | ``.hg/hgrc`` file, as the default to be used for future pulls. |
|
1346 | 1346 | |
|
1347 | 1347 | Only local paths and ``ssh://`` URLs are supported as |
|
1348 | 1348 | destinations. For ``ssh://`` destinations, no working directory or |
|
1349 | 1349 | ``.hg/hgrc`` will be created on the remote side. |
|
1350 | 1350 | |
|
1351 | 1351 | If the source repository has a bookmark called '@' set, that |
|
1352 | 1352 | revision will be checked out in the new repository by default. |
|
1353 | 1353 | |
|
1354 | 1354 | To check out a particular version, use -u/--update, or |
|
1355 | 1355 | -U/--noupdate to create a clone with no working directory. |
|
1356 | 1356 | |
|
1357 | 1357 | To pull only a subset of changesets, specify one or more revisions |
|
1358 | 1358 | identifiers with -r/--rev or branches with -b/--branch. The |
|
1359 | 1359 | resulting clone will contain only the specified changesets and |
|
1360 | 1360 | their ancestors. These options (or 'clone src#rev dest') imply |
|
1361 | 1361 | --pull, even for local source repositories. |
|
1362 | 1362 | |
|
1363 | 1363 | In normal clone mode, the remote normalizes repository data into a common |
|
1364 | 1364 | exchange format and the receiving end translates this data into its local |
|
1365 | 1365 | storage format. --stream activates a different clone mode that essentially |
|
1366 | 1366 | copies repository files from the remote with minimal data processing. This |
|
1367 | 1367 | significantly reduces the CPU cost of a clone both remotely and locally. |
|
1368 | 1368 | However, it often increases the transferred data size by 30-40%. This can |
|
1369 | 1369 | result in substantially faster clones where I/O throughput is plentiful, |
|
1370 | 1370 | especially for larger repositories. A side-effect of --stream clones is |
|
1371 | 1371 | that storage settings and requirements on the remote are applied locally: |
|
1372 | 1372 | a modern client may inherit legacy or inefficient storage used by the |
|
1373 | 1373 | remote or a legacy Mercurial client may not be able to clone from a |
|
1374 | 1374 | modern Mercurial remote. |
|
1375 | 1375 | |
|
1376 | 1376 | .. note:: |
|
1377 | 1377 | |
|
1378 | 1378 | Specifying a tag will include the tagged changeset but not the |
|
1379 | 1379 | changeset containing the tag. |
|
1380 | 1380 | |
|
1381 | 1381 | .. container:: verbose |
|
1382 | 1382 | |
|
1383 | 1383 | For efficiency, hardlinks are used for cloning whenever the |
|
1384 | 1384 | source and destination are on the same filesystem (note this |
|
1385 | 1385 | applies only to the repository data, not to the working |
|
1386 | 1386 | directory). Some filesystems, such as AFS, implement hardlinking |
|
1387 | 1387 | incorrectly, but do not report errors. In these cases, use the |
|
1388 | 1388 | --pull option to avoid hardlinking. |
|
1389 | 1389 | |
|
1390 | 1390 | Mercurial will update the working directory to the first applicable |
|
1391 | 1391 | revision from this list: |
|
1392 | 1392 | |
|
1393 | 1393 | a) null if -U or the source repository has no changesets |
|
1394 | 1394 | b) if -u . and the source repository is local, the first parent of |
|
1395 | 1395 | the source repository's working directory |
|
1396 | 1396 | c) the changeset specified with -u (if a branch name, this means the |
|
1397 | 1397 | latest head of that branch) |
|
1398 | 1398 | d) the changeset specified with -r |
|
1399 | 1399 | e) the tipmost head specified with -b |
|
1400 | 1400 | f) the tipmost head specified with the url#branch source syntax |
|
1401 | 1401 | g) the revision marked with the '@' bookmark, if present |
|
1402 | 1402 | h) the tipmost head of the default branch |
|
1403 | 1403 | i) tip |
|
1404 | 1404 | |
|
1405 | 1405 | When cloning from servers that support it, Mercurial may fetch |
|
1406 | 1406 | pre-generated data from a server-advertised URL. When this is done, |
|
1407 | 1407 | hooks operating on incoming changesets and changegroups may fire twice, |
|
1408 | 1408 | once for the bundle fetched from the URL and another for any additional |
|
1409 | 1409 | data not fetched from this URL. In addition, if an error occurs, the |
|
1410 | 1410 | repository may be rolled back to a partial clone. This behavior may |
|
1411 | 1411 | change in future releases. See :hg:`help -e clonebundles` for more. |
|
1412 | 1412 | |
|
1413 | 1413 | Examples: |
|
1414 | 1414 | |
|
1415 | 1415 | - clone a remote repository to a new directory named hg/:: |
|
1416 | 1416 | |
|
1417 | 1417 | hg clone https://www.mercurial-scm.org/repo/hg/ |
|
1418 | 1418 | |
|
1419 | 1419 | - create a lightweight local clone:: |
|
1420 | 1420 | |
|
1421 | 1421 | hg clone project/ project-feature/ |
|
1422 | 1422 | |
|
1423 | 1423 | - clone from an absolute path on an ssh server (note double-slash):: |
|
1424 | 1424 | |
|
1425 | 1425 | hg clone ssh://user@server//home/projects/alpha/ |
|
1426 | 1426 | |
|
1427 | 1427 | - do a streaming clone while checking out a specified version:: |
|
1428 | 1428 | |
|
1429 | 1429 | hg clone --stream http://server/repo -u 1.5 |
|
1430 | 1430 | |
|
1431 | 1431 | - create a repository without changesets after a particular revision:: |
|
1432 | 1432 | |
|
1433 | 1433 | hg clone -r 04e544 experimental/ good/ |
|
1434 | 1434 | |
|
1435 | 1435 | - clone (and track) a particular named branch:: |
|
1436 | 1436 | |
|
1437 | 1437 | hg clone https://www.mercurial-scm.org/repo/hg/#stable |
|
1438 | 1438 | |
|
1439 | 1439 | See :hg:`help urls` for details on specifying URLs. |
|
1440 | 1440 | |
|
1441 | 1441 | Returns 0 on success. |
|
1442 | 1442 | """ |
|
1443 | 1443 | opts = pycompat.byteskwargs(opts) |
|
1444 | 1444 | if opts.get('noupdate') and opts.get('updaterev'): |
|
1445 | 1445 | raise error.Abort(_("cannot specify both --noupdate and --updaterev")) |
|
1446 | 1446 | |
|
1447 | 1447 | r = hg.clone(ui, opts, source, dest, |
|
1448 | 1448 | pull=opts.get('pull'), |
|
1449 | 1449 | stream=opts.get('stream') or opts.get('uncompressed'), |
|
1450 | 1450 | rev=opts.get('rev'), |
|
1451 | 1451 | update=opts.get('updaterev') or not opts.get('noupdate'), |
|
1452 | 1452 | branch=opts.get('branch'), |
|
1453 | 1453 | shareopts=opts.get('shareopts')) |
|
1454 | 1454 | |
|
1455 | 1455 | return r is None |
|
1456 | 1456 | |
|
1457 | 1457 | @command('^commit|ci', |
|
1458 | 1458 | [('A', 'addremove', None, |
|
1459 | 1459 | _('mark new/missing files as added/removed before committing')), |
|
1460 | 1460 | ('', 'close-branch', None, |
|
1461 | 1461 | _('mark a branch head as closed')), |
|
1462 | 1462 | ('', 'amend', None, _('amend the parent of the working directory')), |
|
1463 | 1463 | ('s', 'secret', None, _('use the secret phase for committing')), |
|
1464 | 1464 | ('e', 'edit', None, _('invoke editor on commit messages')), |
|
1465 | 1465 | ('i', 'interactive', None, _('use interactive mode')), |
|
1466 | 1466 | ] + walkopts + commitopts + commitopts2 + subrepoopts, |
|
1467 | 1467 | _('[OPTION]... [FILE]...'), |
|
1468 | 1468 | inferrepo=True) |
|
1469 | 1469 | def commit(ui, repo, *pats, **opts): |
|
1470 | 1470 | """commit the specified files or all outstanding changes |
|
1471 | 1471 | |
|
1472 | 1472 | Commit changes to the given files into the repository. Unlike a |
|
1473 | 1473 | centralized SCM, this operation is a local operation. See |
|
1474 | 1474 | :hg:`push` for a way to actively distribute your changes. |
|
1475 | 1475 | |
|
1476 | 1476 | If a list of files is omitted, all changes reported by :hg:`status` |
|
1477 | 1477 | will be committed. |
|
1478 | 1478 | |
|
1479 | 1479 | If you are committing the result of a merge, do not provide any |
|
1480 | 1480 | filenames or -I/-X filters. |
|
1481 | 1481 | |
|
1482 | 1482 | If no commit message is specified, Mercurial starts your |
|
1483 | 1483 | configured editor where you can enter a message. In case your |
|
1484 | 1484 | commit fails, you will find a backup of your message in |
|
1485 | 1485 | ``.hg/last-message.txt``. |
|
1486 | 1486 | |
|
1487 | 1487 | The --close-branch flag can be used to mark the current branch |
|
1488 | 1488 | head closed. When all heads of a branch are closed, the branch |
|
1489 | 1489 | will be considered closed and no longer listed. |
|
1490 | 1490 | |
|
1491 | 1491 | The --amend flag can be used to amend the parent of the |
|
1492 | 1492 | working directory with a new commit that contains the changes |
|
1493 | 1493 | in the parent in addition to those currently reported by :hg:`status`, |
|
1494 | 1494 | if there are any. The old commit is stored in a backup bundle in |
|
1495 | 1495 | ``.hg/strip-backup`` (see :hg:`help bundle` and :hg:`help unbundle` |
|
1496 | 1496 | on how to restore it). |
|
1497 | 1497 | |
|
1498 | 1498 | Message, user and date are taken from the amended commit unless |
|
1499 | 1499 | specified. When a message isn't specified on the command line, |
|
1500 | 1500 | the editor will open with the message of the amended commit. |
|
1501 | 1501 | |
|
1502 | 1502 | It is not possible to amend public changesets (see :hg:`help phases`) |
|
1503 | 1503 | or changesets that have children. |
|
1504 | 1504 | |
|
1505 | 1505 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
1506 | 1506 | |
|
1507 | 1507 | Returns 0 on success, 1 if nothing changed. |
|
1508 | 1508 | |
|
1509 | 1509 | .. container:: verbose |
|
1510 | 1510 | |
|
1511 | 1511 | Examples: |
|
1512 | 1512 | |
|
1513 | 1513 | - commit all files ending in .py:: |
|
1514 | 1514 | |
|
1515 | 1515 | hg commit --include "set:**.py" |
|
1516 | 1516 | |
|
1517 | 1517 | - commit all non-binary files:: |
|
1518 | 1518 | |
|
1519 | 1519 | hg commit --exclude "set:binary()" |
|
1520 | 1520 | |
|
1521 | 1521 | - amend the current commit and set the date to now:: |
|
1522 | 1522 | |
|
1523 | 1523 | hg commit --amend --date now |
|
1524 | 1524 | """ |
|
1525 | 1525 | wlock = lock = None |
|
1526 | 1526 | try: |
|
1527 | 1527 | wlock = repo.wlock() |
|
1528 | 1528 | lock = repo.lock() |
|
1529 | 1529 | return _docommit(ui, repo, *pats, **opts) |
|
1530 | 1530 | finally: |
|
1531 | 1531 | release(lock, wlock) |
|
1532 | 1532 | |
|
1533 | 1533 | def _docommit(ui, repo, *pats, **opts): |
|
1534 | 1534 | if opts.get(r'interactive'): |
|
1535 | 1535 | opts.pop(r'interactive') |
|
1536 | 1536 | ret = cmdutil.dorecord(ui, repo, commit, None, False, |
|
1537 | 1537 | cmdutil.recordfilter, *pats, |
|
1538 | 1538 | **opts) |
|
1539 | 1539 | # ret can be 0 (no changes to record) or the value returned by |
|
1540 | 1540 | # commit(), 1 if nothing changed or None on success. |
|
1541 | 1541 | return 1 if ret == 0 else ret |
|
1542 | 1542 | |
|
1543 | 1543 | opts = pycompat.byteskwargs(opts) |
|
1544 | 1544 | if opts.get('subrepos'): |
|
1545 | 1545 | if opts.get('amend'): |
|
1546 | 1546 | raise error.Abort(_('cannot amend with --subrepos')) |
|
1547 | 1547 | # Let --subrepos on the command line override config setting. |
|
1548 | 1548 | ui.setconfig('ui', 'commitsubrepos', True, 'commit') |
|
1549 | 1549 | |
|
1550 | 1550 | cmdutil.checkunfinished(repo, commit=True) |
|
1551 | 1551 | |
|
1552 | 1552 | branch = repo[None].branch() |
|
1553 | 1553 | bheads = repo.branchheads(branch) |
|
1554 | 1554 | |
|
1555 | 1555 | extra = {} |
|
1556 | 1556 | if opts.get('close_branch'): |
|
1557 | 1557 | extra['close'] = '1' |
|
1558 | 1558 | |
|
1559 | 1559 | if not bheads: |
|
1560 | 1560 | raise error.Abort(_('can only close branch heads')) |
|
1561 | 1561 | elif opts.get('amend'): |
|
1562 | 1562 | if repo[None].parents()[0].p1().branch() != branch and \ |
|
1563 | 1563 | repo[None].parents()[0].p2().branch() != branch: |
|
1564 | 1564 | raise error.Abort(_('can only close branch heads')) |
|
1565 | 1565 | |
|
1566 | 1566 | if opts.get('amend'): |
|
1567 | 1567 | if ui.configbool('ui', 'commitsubrepos'): |
|
1568 | 1568 | raise error.Abort(_('cannot amend with ui.commitsubrepos enabled')) |
|
1569 | 1569 | |
|
1570 | 1570 | old = repo['.'] |
|
1571 | 1571 | rewriteutil.precheck(repo, [old.rev()], 'amend') |
|
1572 | 1572 | |
|
1573 | 1573 | # Currently histedit gets confused if an amend happens while histedit |
|
1574 | 1574 | # is in progress. Since we have a checkunfinished command, we are |
|
1575 | 1575 | # temporarily honoring it. |
|
1576 | 1576 | # |
|
1577 | 1577 | # Note: eventually this guard will be removed. Please do not expect |
|
1578 | 1578 | # this behavior to remain. |
|
1579 | 1579 | if not obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
1580 | 1580 | cmdutil.checkunfinished(repo) |
|
1581 | 1581 | |
|
1582 | 1582 | node = cmdutil.amend(ui, repo, old, extra, pats, opts) |
|
1583 | 1583 | if node == old.node(): |
|
1584 | 1584 | ui.status(_("nothing changed\n")) |
|
1585 | 1585 | return 1 |
|
1586 | 1586 | else: |
|
1587 | 1587 | def commitfunc(ui, repo, message, match, opts): |
|
1588 | 1588 | overrides = {} |
|
1589 | 1589 | if opts.get('secret'): |
|
1590 | 1590 | overrides[('phases', 'new-commit')] = 'secret' |
|
1591 | 1591 | |
|
1592 | 1592 | baseui = repo.baseui |
|
1593 | 1593 | with baseui.configoverride(overrides, 'commit'): |
|
1594 | 1594 | with ui.configoverride(overrides, 'commit'): |
|
1595 | 1595 | editform = cmdutil.mergeeditform(repo[None], |
|
1596 | 1596 | 'commit.normal') |
|
1597 | 1597 | editor = cmdutil.getcommiteditor( |
|
1598 | 1598 | editform=editform, **pycompat.strkwargs(opts)) |
|
1599 | 1599 | return repo.commit(message, |
|
1600 | 1600 | opts.get('user'), |
|
1601 | 1601 | opts.get('date'), |
|
1602 | 1602 | match, |
|
1603 | 1603 | editor=editor, |
|
1604 | 1604 | extra=extra) |
|
1605 | 1605 | |
|
1606 | 1606 | node = cmdutil.commit(ui, repo, commitfunc, pats, opts) |
|
1607 | 1607 | |
|
1608 | 1608 | if not node: |
|
1609 | 1609 | stat = cmdutil.postcommitstatus(repo, pats, opts) |
|
1610 | 1610 | if stat[3]: |
|
1611 | 1611 | ui.status(_("nothing changed (%d missing files, see " |
|
1612 | 1612 | "'hg status')\n") % len(stat[3])) |
|
1613 | 1613 | else: |
|
1614 | 1614 | ui.status(_("nothing changed\n")) |
|
1615 | 1615 | return 1 |
|
1616 | 1616 | |
|
1617 | 1617 | cmdutil.commitstatus(repo, node, branch, bheads, opts) |
|
1618 | 1618 | |
|
1619 | 1619 | @command('config|showconfig|debugconfig', |
|
1620 | 1620 | [('u', 'untrusted', None, _('show untrusted configuration options')), |
|
1621 | 1621 | ('e', 'edit', None, _('edit user config')), |
|
1622 | 1622 | ('l', 'local', None, _('edit repository config')), |
|
1623 | 1623 | ('g', 'global', None, _('edit global config'))] + formatteropts, |
|
1624 | 1624 | _('[-u] [NAME]...'), |
|
1625 | 1625 | optionalrepo=True, cmdtype=readonly) |
|
1626 | 1626 | def config(ui, repo, *values, **opts): |
|
1627 | 1627 | """show combined config settings from all hgrc files |
|
1628 | 1628 | |
|
1629 | 1629 | With no arguments, print names and values of all config items. |
|
1630 | 1630 | |
|
1631 | 1631 | With one argument of the form section.name, print just the value |
|
1632 | 1632 | of that config item. |
|
1633 | 1633 | |
|
1634 | 1634 | With multiple arguments, print names and values of all config |
|
1635 | 1635 | items with matching section names or section.names. |
|
1636 | 1636 | |
|
1637 | 1637 | With --edit, start an editor on the user-level config file. With |
|
1638 | 1638 | --global, edit the system-wide config file. With --local, edit the |
|
1639 | 1639 | repository-level config file. |
|
1640 | 1640 | |
|
1641 | 1641 | With --debug, the source (filename and line number) is printed |
|
1642 | 1642 | for each config item. |
|
1643 | 1643 | |
|
1644 | 1644 | See :hg:`help config` for more information about config files. |
|
1645 | 1645 | |
|
1646 | 1646 | Returns 0 on success, 1 if NAME does not exist. |
|
1647 | 1647 | |
|
1648 | 1648 | """ |
|
1649 | 1649 | |
|
1650 | 1650 | opts = pycompat.byteskwargs(opts) |
|
1651 | 1651 | if opts.get('edit') or opts.get('local') or opts.get('global'): |
|
1652 | 1652 | if opts.get('local') and opts.get('global'): |
|
1653 | 1653 | raise error.Abort(_("can't use --local and --global together")) |
|
1654 | 1654 | |
|
1655 | 1655 | if opts.get('local'): |
|
1656 | 1656 | if not repo: |
|
1657 | 1657 | raise error.Abort(_("can't use --local outside a repository")) |
|
1658 | 1658 | paths = [repo.vfs.join('hgrc')] |
|
1659 | 1659 | elif opts.get('global'): |
|
1660 | 1660 | paths = rcutil.systemrcpath() |
|
1661 | 1661 | else: |
|
1662 | 1662 | paths = rcutil.userrcpath() |
|
1663 | 1663 | |
|
1664 | 1664 | for f in paths: |
|
1665 | 1665 | if os.path.exists(f): |
|
1666 | 1666 | break |
|
1667 | 1667 | else: |
|
1668 | 1668 | if opts.get('global'): |
|
1669 | 1669 | samplehgrc = uimod.samplehgrcs['global'] |
|
1670 | 1670 | elif opts.get('local'): |
|
1671 | 1671 | samplehgrc = uimod.samplehgrcs['local'] |
|
1672 | 1672 | else: |
|
1673 | 1673 | samplehgrc = uimod.samplehgrcs['user'] |
|
1674 | 1674 | |
|
1675 | 1675 | f = paths[0] |
|
1676 | 1676 | fp = open(f, "wb") |
|
1677 | 1677 | fp.write(util.tonativeeol(samplehgrc)) |
|
1678 | 1678 | fp.close() |
|
1679 | 1679 | |
|
1680 | 1680 | editor = ui.geteditor() |
|
1681 | 1681 | ui.system("%s \"%s\"" % (editor, f), |
|
1682 | 1682 | onerr=error.Abort, errprefix=_("edit failed"), |
|
1683 | 1683 | blockedtag='config_edit') |
|
1684 | 1684 | return |
|
1685 | 1685 | ui.pager('config') |
|
1686 | 1686 | fm = ui.formatter('config', opts) |
|
1687 | 1687 | for t, f in rcutil.rccomponents(): |
|
1688 | 1688 | if t == 'path': |
|
1689 | 1689 | ui.debug('read config from: %s\n' % f) |
|
1690 | 1690 | elif t == 'items': |
|
1691 | 1691 | for section, name, value, source in f: |
|
1692 | 1692 | ui.debug('set config by: %s\n' % source) |
|
1693 | 1693 | else: |
|
1694 | 1694 | raise error.ProgrammingError('unknown rctype: %s' % t) |
|
1695 | 1695 | untrusted = bool(opts.get('untrusted')) |
|
1696 | 1696 | |
|
1697 | 1697 | selsections = selentries = [] |
|
1698 | 1698 | if values: |
|
1699 | 1699 | selsections = [v for v in values if '.' not in v] |
|
1700 | 1700 | selentries = [v for v in values if '.' in v] |
|
1701 | 1701 | uniquesel = (len(selentries) == 1 and not selsections) |
|
1702 | 1702 | selsections = set(selsections) |
|
1703 | 1703 | selentries = set(selentries) |
|
1704 | 1704 | |
|
1705 | 1705 | matched = False |
|
1706 | 1706 | for section, name, value in ui.walkconfig(untrusted=untrusted): |
|
1707 | 1707 | source = ui.configsource(section, name, untrusted) |
|
1708 | 1708 | value = pycompat.bytestr(value) |
|
1709 | 1709 | if fm.isplain(): |
|
1710 | 1710 | source = source or 'none' |
|
1711 | 1711 | value = value.replace('\n', '\\n') |
|
1712 | 1712 | entryname = section + '.' + name |
|
1713 | 1713 | if values and not (section in selsections or entryname in selentries): |
|
1714 | 1714 | continue |
|
1715 | 1715 | fm.startitem() |
|
1716 | 1716 | fm.condwrite(ui.debugflag, 'source', '%s: ', source) |
|
1717 | 1717 | if uniquesel: |
|
1718 | 1718 | fm.data(name=entryname) |
|
1719 | 1719 | fm.write('value', '%s\n', value) |
|
1720 | 1720 | else: |
|
1721 | 1721 | fm.write('name value', '%s=%s\n', entryname, value) |
|
1722 | 1722 | matched = True |
|
1723 | 1723 | fm.end() |
|
1724 | 1724 | if matched: |
|
1725 | 1725 | return 0 |
|
1726 | 1726 | return 1 |
|
1727 | 1727 | |
|
1728 | 1728 | @command('copy|cp', |
|
1729 | 1729 | [('A', 'after', None, _('record a copy that has already occurred')), |
|
1730 | 1730 | ('f', 'force', None, _('forcibly copy over an existing managed file')), |
|
1731 | 1731 | ] + walkopts + dryrunopts, |
|
1732 | 1732 | _('[OPTION]... [SOURCE]... DEST')) |
|
1733 | 1733 | def copy(ui, repo, *pats, **opts): |
|
1734 | 1734 | """mark files as copied for the next commit |
|
1735 | 1735 | |
|
1736 | 1736 | Mark dest as having copies of source files. If dest is a |
|
1737 | 1737 | directory, copies are put in that directory. If dest is a file, |
|
1738 | 1738 | the source must be a single file. |
|
1739 | 1739 | |
|
1740 | 1740 | By default, this command copies the contents of files as they |
|
1741 | 1741 | exist in the working directory. If invoked with -A/--after, the |
|
1742 | 1742 | operation is recorded, but no copying is performed. |
|
1743 | 1743 | |
|
1744 | 1744 | This command takes effect with the next commit. To undo a copy |
|
1745 | 1745 | before that, see :hg:`revert`. |
|
1746 | 1746 | |
|
1747 | 1747 | Returns 0 on success, 1 if errors are encountered. |
|
1748 | 1748 | """ |
|
1749 | 1749 | opts = pycompat.byteskwargs(opts) |
|
1750 | 1750 | with repo.wlock(False): |
|
1751 | 1751 | return cmdutil.copy(ui, repo, pats, opts) |
|
1752 | 1752 | |
|
1753 | 1753 | @command('debugcommands', [], _('[COMMAND]'), norepo=True) |
|
1754 | 1754 | def debugcommands(ui, cmd='', *args): |
|
1755 | 1755 | """list all available commands and options""" |
|
1756 | 1756 | for cmd, vals in sorted(table.iteritems()): |
|
1757 | 1757 | cmd = cmd.split('|')[0].strip('^') |
|
1758 | 1758 | opts = ', '.join([i[1] for i in vals[1]]) |
|
1759 | 1759 | ui.write('%s: %s\n' % (cmd, opts)) |
|
1760 | 1760 | |
|
1761 | 1761 | @command('debugcomplete', |
|
1762 | 1762 | [('o', 'options', None, _('show the command options'))], |
|
1763 | 1763 | _('[-o] CMD'), |
|
1764 | 1764 | norepo=True) |
|
1765 | 1765 | def debugcomplete(ui, cmd='', **opts): |
|
1766 | 1766 | """returns the completion list associated with the given command""" |
|
1767 | 1767 | |
|
1768 | 1768 | if opts.get(r'options'): |
|
1769 | 1769 | options = [] |
|
1770 | 1770 | otables = [globalopts] |
|
1771 | 1771 | if cmd: |
|
1772 | 1772 | aliases, entry = cmdutil.findcmd(cmd, table, False) |
|
1773 | 1773 | otables.append(entry[1]) |
|
1774 | 1774 | for t in otables: |
|
1775 | 1775 | for o in t: |
|
1776 | 1776 | if "(DEPRECATED)" in o[3]: |
|
1777 | 1777 | continue |
|
1778 | 1778 | if o[0]: |
|
1779 | 1779 | options.append('-%s' % o[0]) |
|
1780 | 1780 | options.append('--%s' % o[1]) |
|
1781 | 1781 | ui.write("%s\n" % "\n".join(options)) |
|
1782 | 1782 | return |
|
1783 | 1783 | |
|
1784 | 1784 | cmdlist, unused_allcmds = cmdutil.findpossible(cmd, table) |
|
1785 | 1785 | if ui.verbose: |
|
1786 | 1786 | cmdlist = [' '.join(c[0]) for c in cmdlist.values()] |
|
1787 | 1787 | ui.write("%s\n" % "\n".join(sorted(cmdlist))) |
|
1788 | 1788 | |
|
1789 | 1789 | @command('^diff', |
|
1790 | 1790 | [('r', 'rev', [], _('revision'), _('REV')), |
|
1791 | 1791 | ('c', 'change', '', _('change made by revision'), _('REV')) |
|
1792 | 1792 | ] + diffopts + diffopts2 + walkopts + subrepoopts, |
|
1793 | 1793 | _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'), |
|
1794 | 1794 | inferrepo=True, cmdtype=readonly) |
|
1795 | 1795 | def diff(ui, repo, *pats, **opts): |
|
1796 | 1796 | """diff repository (or selected files) |
|
1797 | 1797 | |
|
1798 | 1798 | Show differences between revisions for the specified files. |
|
1799 | 1799 | |
|
1800 | 1800 | Differences between files are shown using the unified diff format. |
|
1801 | 1801 | |
|
1802 | 1802 | .. note:: |
|
1803 | 1803 | |
|
1804 | 1804 | :hg:`diff` may generate unexpected results for merges, as it will |
|
1805 | 1805 | default to comparing against the working directory's first |
|
1806 | 1806 | parent changeset if no revisions are specified. |
|
1807 | 1807 | |
|
1808 | 1808 | When two revision arguments are given, then changes are shown |
|
1809 | 1809 | between those revisions. If only one revision is specified then |
|
1810 | 1810 | that revision is compared to the working directory, and, when no |
|
1811 | 1811 | revisions are specified, the working directory files are compared |
|
1812 | 1812 | to its first parent. |
|
1813 | 1813 | |
|
1814 | 1814 | Alternatively you can specify -c/--change with a revision to see |
|
1815 | 1815 | the changes in that changeset relative to its first parent. |
|
1816 | 1816 | |
|
1817 | 1817 | Without the -a/--text option, diff will avoid generating diffs of |
|
1818 | 1818 | files it detects as binary. With -a, diff will generate a diff |
|
1819 | 1819 | anyway, probably with undesirable results. |
|
1820 | 1820 | |
|
1821 | 1821 | Use the -g/--git option to generate diffs in the git extended diff |
|
1822 | 1822 | format. For more information, read :hg:`help diffs`. |
|
1823 | 1823 | |
|
1824 | 1824 | .. container:: verbose |
|
1825 | 1825 | |
|
1826 | 1826 | Examples: |
|
1827 | 1827 | |
|
1828 | 1828 | - compare a file in the current working directory to its parent:: |
|
1829 | 1829 | |
|
1830 | 1830 | hg diff foo.c |
|
1831 | 1831 | |
|
1832 | 1832 | - compare two historical versions of a directory, with rename info:: |
|
1833 | 1833 | |
|
1834 | 1834 | hg diff --git -r 1.0:1.2 lib/ |
|
1835 | 1835 | |
|
1836 | 1836 | - get change stats relative to the last change on some date:: |
|
1837 | 1837 | |
|
1838 | 1838 | hg diff --stat -r "date('may 2')" |
|
1839 | 1839 | |
|
1840 | 1840 | - diff all newly-added files that contain a keyword:: |
|
1841 | 1841 | |
|
1842 | 1842 | hg diff "set:added() and grep(GNU)" |
|
1843 | 1843 | |
|
1844 | 1844 | - compare a revision and its parents:: |
|
1845 | 1845 | |
|
1846 | 1846 | hg diff -c 9353 # compare against first parent |
|
1847 | 1847 | hg diff -r 9353^:9353 # same using revset syntax |
|
1848 | 1848 | hg diff -r 9353^2:9353 # compare against the second parent |
|
1849 | 1849 | |
|
1850 | 1850 | Returns 0 on success. |
|
1851 | 1851 | """ |
|
1852 | 1852 | |
|
1853 | 1853 | opts = pycompat.byteskwargs(opts) |
|
1854 | 1854 | revs = opts.get('rev') |
|
1855 | 1855 | change = opts.get('change') |
|
1856 | 1856 | stat = opts.get('stat') |
|
1857 | 1857 | reverse = opts.get('reverse') |
|
1858 | 1858 | |
|
1859 | 1859 | if revs and change: |
|
1860 | 1860 | msg = _('cannot specify --rev and --change at the same time') |
|
1861 | 1861 | raise error.Abort(msg) |
|
1862 | 1862 | elif change: |
|
1863 | 1863 | repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn') |
|
1864 | 1864 | node2 = scmutil.revsingle(repo, change, None).node() |
|
1865 | 1865 | node1 = repo[node2].p1().node() |
|
1866 | 1866 | else: |
|
1867 | 1867 | repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn') |
|
1868 | 1868 | node1, node2 = scmutil.revpair(repo, revs) |
|
1869 | 1869 | |
|
1870 | 1870 | if reverse: |
|
1871 | 1871 | node1, node2 = node2, node1 |
|
1872 | 1872 | |
|
1873 | 1873 | diffopts = patch.diffallopts(ui, opts) |
|
1874 | 1874 | m = scmutil.match(repo[node2], pats, opts) |
|
1875 | 1875 | ui.pager('diff') |
|
1876 | 1876 | logcmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat, |
|
1877 | 1877 | listsubrepos=opts.get('subrepos'), |
|
1878 | 1878 | root=opts.get('root')) |
|
1879 | 1879 | |
|
1880 | 1880 | @command('^export', |
|
1881 | 1881 | [('o', 'output', '', |
|
1882 | 1882 | _('print output to file with formatted name'), _('FORMAT')), |
|
1883 | 1883 | ('', 'switch-parent', None, _('diff against the second parent')), |
|
1884 | 1884 | ('r', 'rev', [], _('revisions to export'), _('REV')), |
|
1885 | 1885 | ] + diffopts, |
|
1886 | 1886 | _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'), cmdtype=readonly) |
|
1887 | 1887 | def export(ui, repo, *changesets, **opts): |
|
1888 | 1888 | """dump the header and diffs for one or more changesets |
|
1889 | 1889 | |
|
1890 | 1890 | Print the changeset header and diffs for one or more revisions. |
|
1891 | 1891 | If no revision is given, the parent of the working directory is used. |
|
1892 | 1892 | |
|
1893 | 1893 | The information shown in the changeset header is: author, date, |
|
1894 | 1894 | branch name (if non-default), changeset hash, parent(s) and commit |
|
1895 | 1895 | comment. |
|
1896 | 1896 | |
|
1897 | 1897 | .. note:: |
|
1898 | 1898 | |
|
1899 | 1899 | :hg:`export` may generate unexpected diff output for merge |
|
1900 | 1900 | changesets, as it will compare the merge changeset against its |
|
1901 | 1901 | first parent only. |
|
1902 | 1902 | |
|
1903 | 1903 | Output may be to a file, in which case the name of the file is |
|
1904 | 1904 | given using a format string. The formatting rules are as follows: |
|
1905 | 1905 | |
|
1906 | 1906 | :``%%``: literal "%" character |
|
1907 | 1907 | :``%H``: changeset hash (40 hexadecimal digits) |
|
1908 | 1908 | :``%N``: number of patches being generated |
|
1909 | 1909 | :``%R``: changeset revision number |
|
1910 | 1910 | :``%b``: basename of the exporting repository |
|
1911 | 1911 | :``%h``: short-form changeset hash (12 hexadecimal digits) |
|
1912 | 1912 | :``%m``: first line of the commit message (only alphanumeric characters) |
|
1913 | 1913 | :``%n``: zero-padded sequence number, starting at 1 |
|
1914 | 1914 | :``%r``: zero-padded changeset revision number |
|
1915 | 1915 | |
|
1916 | 1916 | Without the -a/--text option, export will avoid generating diffs |
|
1917 | 1917 | of files it detects as binary. With -a, export will generate a |
|
1918 | 1918 | diff anyway, probably with undesirable results. |
|
1919 | 1919 | |
|
1920 | 1920 | Use the -g/--git option to generate diffs in the git extended diff |
|
1921 | 1921 | format. See :hg:`help diffs` for more information. |
|
1922 | 1922 | |
|
1923 | 1923 | With the --switch-parent option, the diff will be against the |
|
1924 | 1924 | second parent. It can be useful to review a merge. |
|
1925 | 1925 | |
|
1926 | 1926 | .. container:: verbose |
|
1927 | 1927 | |
|
1928 | 1928 | Examples: |
|
1929 | 1929 | |
|
1930 | 1930 | - use export and import to transplant a bugfix to the current |
|
1931 | 1931 | branch:: |
|
1932 | 1932 | |
|
1933 | 1933 | hg export -r 9353 | hg import - |
|
1934 | 1934 | |
|
1935 | 1935 | - export all the changesets between two revisions to a file with |
|
1936 | 1936 | rename information:: |
|
1937 | 1937 | |
|
1938 | 1938 | hg export --git -r 123:150 > changes.txt |
|
1939 | 1939 | |
|
1940 | 1940 | - split outgoing changes into a series of patches with |
|
1941 | 1941 | descriptive names:: |
|
1942 | 1942 | |
|
1943 | 1943 | hg export -r "outgoing()" -o "%n-%m.patch" |
|
1944 | 1944 | |
|
1945 | 1945 | Returns 0 on success. |
|
1946 | 1946 | """ |
|
1947 | 1947 | opts = pycompat.byteskwargs(opts) |
|
1948 | 1948 | changesets += tuple(opts.get('rev', [])) |
|
1949 | 1949 | if not changesets: |
|
1950 | 1950 | changesets = ['.'] |
|
1951 | 1951 | repo = scmutil.unhidehashlikerevs(repo, changesets, 'nowarn') |
|
1952 | 1952 | revs = scmutil.revrange(repo, changesets) |
|
1953 | 1953 | if not revs: |
|
1954 | 1954 | raise error.Abort(_("export requires at least one changeset")) |
|
1955 | 1955 | if len(revs) > 1: |
|
1956 | 1956 | ui.note(_('exporting patches:\n')) |
|
1957 | 1957 | else: |
|
1958 | 1958 | ui.note(_('exporting patch:\n')) |
|
1959 | 1959 | ui.pager('export') |
|
1960 | 1960 | cmdutil.export(repo, revs, fntemplate=opts.get('output'), |
|
1961 | 1961 | switch_parent=opts.get('switch_parent'), |
|
1962 | 1962 | opts=patch.diffallopts(ui, opts)) |
|
1963 | 1963 | |
|
1964 | 1964 | @command('files', |
|
1965 | 1965 | [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')), |
|
1966 | 1966 | ('0', 'print0', None, _('end filenames with NUL, for use with xargs')), |
|
1967 | 1967 | ] + walkopts + formatteropts + subrepoopts, |
|
1968 | 1968 | _('[OPTION]... [FILE]...'), cmdtype=readonly) |
|
1969 | 1969 | def files(ui, repo, *pats, **opts): |
|
1970 | 1970 | """list tracked files |
|
1971 | 1971 | |
|
1972 | 1972 | Print files under Mercurial control in the working directory or |
|
1973 | 1973 | specified revision for given files (excluding removed files). |
|
1974 | 1974 | Files can be specified as filenames or filesets. |
|
1975 | 1975 | |
|
1976 | 1976 | If no files are given to match, this command prints the names |
|
1977 | 1977 | of all files under Mercurial control. |
|
1978 | 1978 | |
|
1979 | 1979 | .. container:: verbose |
|
1980 | 1980 | |
|
1981 | 1981 | Examples: |
|
1982 | 1982 | |
|
1983 | 1983 | - list all files under the current directory:: |
|
1984 | 1984 | |
|
1985 | 1985 | hg files . |
|
1986 | 1986 | |
|
1987 | 1987 | - shows sizes and flags for current revision:: |
|
1988 | 1988 | |
|
1989 | 1989 | hg files -vr . |
|
1990 | 1990 | |
|
1991 | 1991 | - list all files named README:: |
|
1992 | 1992 | |
|
1993 | 1993 | hg files -I "**/README" |
|
1994 | 1994 | |
|
1995 | 1995 | - list all binary files:: |
|
1996 | 1996 | |
|
1997 | 1997 | hg files "set:binary()" |
|
1998 | 1998 | |
|
1999 | 1999 | - find files containing a regular expression:: |
|
2000 | 2000 | |
|
2001 | 2001 | hg files "set:grep('bob')" |
|
2002 | 2002 | |
|
2003 | 2003 | - search tracked file contents with xargs and grep:: |
|
2004 | 2004 | |
|
2005 | 2005 | hg files -0 | xargs -0 grep foo |
|
2006 | 2006 | |
|
2007 | 2007 | See :hg:`help patterns` and :hg:`help filesets` for more information |
|
2008 | 2008 | on specifying file patterns. |
|
2009 | 2009 | |
|
2010 | 2010 | Returns 0 if a match is found, 1 otherwise. |
|
2011 | 2011 | |
|
2012 | 2012 | """ |
|
2013 | 2013 | |
|
2014 | 2014 | opts = pycompat.byteskwargs(opts) |
|
2015 | 2015 | rev = opts.get('rev') |
|
2016 | 2016 | if rev: |
|
2017 | 2017 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
2018 | 2018 | ctx = scmutil.revsingle(repo, rev, None) |
|
2019 | 2019 | |
|
2020 | 2020 | end = '\n' |
|
2021 | 2021 | if opts.get('print0'): |
|
2022 | 2022 | end = '\0' |
|
2023 | 2023 | fmt = '%s' + end |
|
2024 | 2024 | |
|
2025 | 2025 | m = scmutil.match(ctx, pats, opts) |
|
2026 | 2026 | ui.pager('files') |
|
2027 | 2027 | with ui.formatter('files', opts) as fm: |
|
2028 | 2028 | return cmdutil.files(ui, ctx, m, fm, fmt, opts.get('subrepos')) |
|
2029 | 2029 | |
|
2030 | 2030 | @command('^forget', walkopts, _('[OPTION]... FILE...'), inferrepo=True) |
|
2031 | 2031 | def forget(ui, repo, *pats, **opts): |
|
2032 | 2032 | """forget the specified files on the next commit |
|
2033 | 2033 | |
|
2034 | 2034 | Mark the specified files so they will no longer be tracked |
|
2035 | 2035 | after the next commit. |
|
2036 | 2036 | |
|
2037 | 2037 | This only removes files from the current branch, not from the |
|
2038 | 2038 | entire project history, and it does not delete them from the |
|
2039 | 2039 | working directory. |
|
2040 | 2040 | |
|
2041 | 2041 | To delete the file from the working directory, see :hg:`remove`. |
|
2042 | 2042 | |
|
2043 | 2043 | To undo a forget before the next commit, see :hg:`add`. |
|
2044 | 2044 | |
|
2045 | 2045 | .. container:: verbose |
|
2046 | 2046 | |
|
2047 | 2047 | Examples: |
|
2048 | 2048 | |
|
2049 | 2049 | - forget newly-added binary files:: |
|
2050 | 2050 | |
|
2051 | 2051 | hg forget "set:added() and binary()" |
|
2052 | 2052 | |
|
2053 | 2053 | - forget files that would be excluded by .hgignore:: |
|
2054 | 2054 | |
|
2055 | 2055 | hg forget "set:hgignore()" |
|
2056 | 2056 | |
|
2057 | 2057 | Returns 0 on success. |
|
2058 | 2058 | """ |
|
2059 | 2059 | |
|
2060 | 2060 | opts = pycompat.byteskwargs(opts) |
|
2061 | 2061 | if not pats: |
|
2062 | 2062 | raise error.Abort(_('no files specified')) |
|
2063 | 2063 | |
|
2064 | 2064 | m = scmutil.match(repo[None], pats, opts) |
|
2065 | 2065 | rejected = cmdutil.forget(ui, repo, m, prefix="", explicitonly=False)[0] |
|
2066 | 2066 | return rejected and 1 or 0 |
|
2067 | 2067 | |
|
2068 | 2068 | @command( |
|
2069 | 2069 | 'graft', |
|
2070 | 2070 | [('r', 'rev', [], _('revisions to graft'), _('REV')), |
|
2071 | 2071 | ('c', 'continue', False, _('resume interrupted graft')), |
|
2072 | 2072 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
2073 | 2073 | ('', 'log', None, _('append graft info to log message')), |
|
2074 | 2074 | ('f', 'force', False, _('force graft')), |
|
2075 | 2075 | ('D', 'currentdate', False, |
|
2076 | 2076 | _('record the current date as commit date')), |
|
2077 | 2077 | ('U', 'currentuser', False, |
|
2078 | 2078 | _('record the current user as committer'), _('DATE'))] |
|
2079 | 2079 | + commitopts2 + mergetoolopts + dryrunopts, |
|
2080 | 2080 | _('[OPTION]... [-r REV]... REV...')) |
|
2081 | 2081 | def graft(ui, repo, *revs, **opts): |
|
2082 | 2082 | '''copy changes from other branches onto the current branch |
|
2083 | 2083 | |
|
2084 | 2084 | This command uses Mercurial's merge logic to copy individual |
|
2085 | 2085 | changes from other branches without merging branches in the |
|
2086 | 2086 | history graph. This is sometimes known as 'backporting' or |
|
2087 | 2087 | 'cherry-picking'. By default, graft will copy user, date, and |
|
2088 | 2088 | description from the source changesets. |
|
2089 | 2089 | |
|
2090 | 2090 | Changesets that are ancestors of the current revision, that have |
|
2091 | 2091 | already been grafted, or that are merges will be skipped. |
|
2092 | 2092 | |
|
2093 | 2093 | If --log is specified, log messages will have a comment appended |
|
2094 | 2094 | of the form:: |
|
2095 | 2095 | |
|
2096 | 2096 | (grafted from CHANGESETHASH) |
|
2097 | 2097 | |
|
2098 | 2098 | If --force is specified, revisions will be grafted even if they |
|
2099 | 2099 | are already ancestors of, or have been grafted to, the destination. |
|
2100 | 2100 | This is useful when the revisions have since been backed out. |
|
2101 | 2101 | |
|
2102 | 2102 | If a graft merge results in conflicts, the graft process is |
|
2103 | 2103 | interrupted so that the current merge can be manually resolved. |
|
2104 | 2104 | Once all conflicts are addressed, the graft process can be |
|
2105 | 2105 | continued with the -c/--continue option. |
|
2106 | 2106 | |
|
2107 | 2107 | .. note:: |
|
2108 | 2108 | |
|
2109 | 2109 | The -c/--continue option does not reapply earlier options, except |
|
2110 | 2110 | for --force. |
|
2111 | 2111 | |
|
2112 | 2112 | .. container:: verbose |
|
2113 | 2113 | |
|
2114 | 2114 | Examples: |
|
2115 | 2115 | |
|
2116 | 2116 | - copy a single change to the stable branch and edit its description:: |
|
2117 | 2117 | |
|
2118 | 2118 | hg update stable |
|
2119 | 2119 | hg graft --edit 9393 |
|
2120 | 2120 | |
|
2121 | 2121 | - graft a range of changesets with one exception, updating dates:: |
|
2122 | 2122 | |
|
2123 | 2123 | hg graft -D "2085::2093 and not 2091" |
|
2124 | 2124 | |
|
2125 | 2125 | - continue a graft after resolving conflicts:: |
|
2126 | 2126 | |
|
2127 | 2127 | hg graft -c |
|
2128 | 2128 | |
|
2129 | 2129 | - show the source of a grafted changeset:: |
|
2130 | 2130 | |
|
2131 | 2131 | hg log --debug -r . |
|
2132 | 2132 | |
|
2133 | 2133 | - show revisions sorted by date:: |
|
2134 | 2134 | |
|
2135 | 2135 | hg log -r "sort(all(), date)" |
|
2136 | 2136 | |
|
2137 | 2137 | See :hg:`help revisions` for more about specifying revisions. |
|
2138 | 2138 | |
|
2139 | 2139 | Returns 0 on successful completion. |
|
2140 | 2140 | ''' |
|
2141 | 2141 | with repo.wlock(): |
|
2142 | 2142 | return _dograft(ui, repo, *revs, **opts) |
|
2143 | 2143 | |
|
2144 | 2144 | def _dograft(ui, repo, *revs, **opts): |
|
2145 | 2145 | opts = pycompat.byteskwargs(opts) |
|
2146 | 2146 | if revs and opts.get('rev'): |
|
2147 | 2147 | ui.warn(_('warning: inconsistent use of --rev might give unexpected ' |
|
2148 | 2148 | 'revision ordering!\n')) |
|
2149 | 2149 | |
|
2150 | 2150 | revs = list(revs) |
|
2151 | 2151 | revs.extend(opts.get('rev')) |
|
2152 | 2152 | |
|
2153 | 2153 | if not opts.get('user') and opts.get('currentuser'): |
|
2154 | 2154 | opts['user'] = ui.username() |
|
2155 | 2155 | if not opts.get('date') and opts.get('currentdate'): |
|
2156 | 2156 | opts['date'] = "%d %d" % util.makedate() |
|
2157 | 2157 | |
|
2158 | 2158 | editor = cmdutil.getcommiteditor(editform='graft', |
|
2159 | 2159 | **pycompat.strkwargs(opts)) |
|
2160 | 2160 | |
|
2161 | 2161 | cont = False |
|
2162 | 2162 | if opts.get('continue'): |
|
2163 | 2163 | cont = True |
|
2164 | 2164 | if revs: |
|
2165 | 2165 | raise error.Abort(_("can't specify --continue and revisions")) |
|
2166 | 2166 | # read in unfinished revisions |
|
2167 | 2167 | try: |
|
2168 | 2168 | nodes = repo.vfs.read('graftstate').splitlines() |
|
2169 | 2169 | revs = [repo[node].rev() for node in nodes] |
|
2170 | 2170 | except IOError as inst: |
|
2171 | 2171 | if inst.errno != errno.ENOENT: |
|
2172 | 2172 | raise |
|
2173 | 2173 | cmdutil.wrongtooltocontinue(repo, _('graft')) |
|
2174 | 2174 | else: |
|
2175 | 2175 | cmdutil.checkunfinished(repo) |
|
2176 | 2176 | cmdutil.bailifchanged(repo) |
|
2177 | 2177 | if not revs: |
|
2178 | 2178 | raise error.Abort(_('no revisions specified')) |
|
2179 | 2179 | revs = scmutil.revrange(repo, revs) |
|
2180 | 2180 | |
|
2181 | 2181 | skipped = set() |
|
2182 | 2182 | # check for merges |
|
2183 | 2183 | for rev in repo.revs('%ld and merge()', revs): |
|
2184 | 2184 | ui.warn(_('skipping ungraftable merge revision %d\n') % rev) |
|
2185 | 2185 | skipped.add(rev) |
|
2186 | 2186 | revs = [r for r in revs if r not in skipped] |
|
2187 | 2187 | if not revs: |
|
2188 | 2188 | return -1 |
|
2189 | 2189 | |
|
2190 | 2190 | # Don't check in the --continue case, in effect retaining --force across |
|
2191 | 2191 | # --continues. That's because without --force, any revisions we decided to |
|
2192 | 2192 | # skip would have been filtered out here, so they wouldn't have made their |
|
2193 | 2193 | # way to the graftstate. With --force, any revisions we would have otherwise |
|
2194 | 2194 | # skipped would not have been filtered out, and if they hadn't been applied |
|
2195 | 2195 | # already, they'd have been in the graftstate. |
|
2196 | 2196 | if not (cont or opts.get('force')): |
|
2197 | 2197 | # check for ancestors of dest branch |
|
2198 | 2198 | crev = repo['.'].rev() |
|
2199 | 2199 | ancestors = repo.changelog.ancestors([crev], inclusive=True) |
|
2200 | 2200 | # XXX make this lazy in the future |
|
2201 | 2201 | # don't mutate while iterating, create a copy |
|
2202 | 2202 | for rev in list(revs): |
|
2203 | 2203 | if rev in ancestors: |
|
2204 | 2204 | ui.warn(_('skipping ancestor revision %d:%s\n') % |
|
2205 | 2205 | (rev, repo[rev])) |
|
2206 | 2206 | # XXX remove on list is slow |
|
2207 | 2207 | revs.remove(rev) |
|
2208 | 2208 | if not revs: |
|
2209 | 2209 | return -1 |
|
2210 | 2210 | |
|
2211 | 2211 | # analyze revs for earlier grafts |
|
2212 | 2212 | ids = {} |
|
2213 | 2213 | for ctx in repo.set("%ld", revs): |
|
2214 | 2214 | ids[ctx.hex()] = ctx.rev() |
|
2215 | 2215 | n = ctx.extra().get('source') |
|
2216 | 2216 | if n: |
|
2217 | 2217 | ids[n] = ctx.rev() |
|
2218 | 2218 | |
|
2219 | 2219 | # check ancestors for earlier grafts |
|
2220 | 2220 | ui.debug('scanning for duplicate grafts\n') |
|
2221 | 2221 | |
|
2222 | 2222 | # The only changesets we can be sure doesn't contain grafts of any |
|
2223 | 2223 | # revs, are the ones that are common ancestors of *all* revs: |
|
2224 | 2224 | for rev in repo.revs('only(%d,ancestor(%ld))', crev, revs): |
|
2225 | 2225 | ctx = repo[rev] |
|
2226 | 2226 | n = ctx.extra().get('source') |
|
2227 | 2227 | if n in ids: |
|
2228 | 2228 | try: |
|
2229 | 2229 | r = repo[n].rev() |
|
2230 | 2230 | except error.RepoLookupError: |
|
2231 | 2231 | r = None |
|
2232 | 2232 | if r in revs: |
|
2233 | 2233 | ui.warn(_('skipping revision %d:%s ' |
|
2234 | 2234 | '(already grafted to %d:%s)\n') |
|
2235 | 2235 | % (r, repo[r], rev, ctx)) |
|
2236 | 2236 | revs.remove(r) |
|
2237 | 2237 | elif ids[n] in revs: |
|
2238 | 2238 | if r is None: |
|
2239 | 2239 | ui.warn(_('skipping already grafted revision %d:%s ' |
|
2240 | 2240 | '(%d:%s also has unknown origin %s)\n') |
|
2241 | 2241 | % (ids[n], repo[ids[n]], rev, ctx, n[:12])) |
|
2242 | 2242 | else: |
|
2243 | 2243 | ui.warn(_('skipping already grafted revision %d:%s ' |
|
2244 | 2244 | '(%d:%s also has origin %d:%s)\n') |
|
2245 | 2245 | % (ids[n], repo[ids[n]], rev, ctx, r, n[:12])) |
|
2246 | 2246 | revs.remove(ids[n]) |
|
2247 | 2247 | elif ctx.hex() in ids: |
|
2248 | 2248 | r = ids[ctx.hex()] |
|
2249 | 2249 | ui.warn(_('skipping already grafted revision %d:%s ' |
|
2250 | 2250 | '(was grafted from %d:%s)\n') % |
|
2251 | 2251 | (r, repo[r], rev, ctx)) |
|
2252 | 2252 | revs.remove(r) |
|
2253 | 2253 | if not revs: |
|
2254 | 2254 | return -1 |
|
2255 | 2255 | |
|
2256 | 2256 | for pos, ctx in enumerate(repo.set("%ld", revs)): |
|
2257 | 2257 | desc = '%d:%s "%s"' % (ctx.rev(), ctx, |
|
2258 | 2258 | ctx.description().split('\n', 1)[0]) |
|
2259 | 2259 | names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node()) |
|
2260 | 2260 | if names: |
|
2261 | 2261 | desc += ' (%s)' % ' '.join(names) |
|
2262 | 2262 | ui.status(_('grafting %s\n') % desc) |
|
2263 | 2263 | if opts.get('dry_run'): |
|
2264 | 2264 | continue |
|
2265 | 2265 | |
|
2266 | 2266 | source = ctx.extra().get('source') |
|
2267 | 2267 | extra = {} |
|
2268 | 2268 | if source: |
|
2269 | 2269 | extra['source'] = source |
|
2270 | 2270 | extra['intermediate-source'] = ctx.hex() |
|
2271 | 2271 | else: |
|
2272 | 2272 | extra['source'] = ctx.hex() |
|
2273 | 2273 | user = ctx.user() |
|
2274 | 2274 | if opts.get('user'): |
|
2275 | 2275 | user = opts['user'] |
|
2276 | 2276 | date = ctx.date() |
|
2277 | 2277 | if opts.get('date'): |
|
2278 | 2278 | date = opts['date'] |
|
2279 | 2279 | message = ctx.description() |
|
2280 | 2280 | if opts.get('log'): |
|
2281 | 2281 | message += '\n(grafted from %s)' % ctx.hex() |
|
2282 | 2282 | |
|
2283 | 2283 | # we don't merge the first commit when continuing |
|
2284 | 2284 | if not cont: |
|
2285 | 2285 | # perform the graft merge with p1(rev) as 'ancestor' |
|
2286 | 2286 | try: |
|
2287 | 2287 | # ui.forcemerge is an internal variable, do not document |
|
2288 | 2288 | repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), |
|
2289 | 2289 | 'graft') |
|
2290 | 2290 | stats = mergemod.graft(repo, ctx, ctx.p1(), |
|
2291 | 2291 | ['local', 'graft']) |
|
2292 | 2292 | finally: |
|
2293 | 2293 | repo.ui.setconfig('ui', 'forcemerge', '', 'graft') |
|
2294 | 2294 | # report any conflicts |
|
2295 | 2295 | if stats and stats[3] > 0: |
|
2296 | 2296 | # write out state for --continue |
|
2297 | 2297 | nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]] |
|
2298 | 2298 | repo.vfs.write('graftstate', ''.join(nodelines)) |
|
2299 | 2299 | extra = '' |
|
2300 | 2300 | if opts.get('user'): |
|
2301 | 2301 | extra += ' --user %s' % util.shellquote(opts['user']) |
|
2302 | 2302 | if opts.get('date'): |
|
2303 | 2303 | extra += ' --date %s' % util.shellquote(opts['date']) |
|
2304 | 2304 | if opts.get('log'): |
|
2305 | 2305 | extra += ' --log' |
|
2306 | 2306 | hint=_("use 'hg resolve' and 'hg graft --continue%s'") % extra |
|
2307 | 2307 | raise error.Abort( |
|
2308 | 2308 | _("unresolved conflicts, can't continue"), |
|
2309 | 2309 | hint=hint) |
|
2310 | 2310 | else: |
|
2311 | 2311 | cont = False |
|
2312 | 2312 | |
|
2313 | 2313 | # commit |
|
2314 | 2314 | node = repo.commit(text=message, user=user, |
|
2315 | 2315 | date=date, extra=extra, editor=editor) |
|
2316 | 2316 | if node is None: |
|
2317 | 2317 | ui.warn( |
|
2318 | 2318 | _('note: graft of %d:%s created no changes to commit\n') % |
|
2319 | 2319 | (ctx.rev(), ctx)) |
|
2320 | 2320 | |
|
2321 | 2321 | # remove state when we complete successfully |
|
2322 | 2322 | if not opts.get('dry_run'): |
|
2323 | 2323 | repo.vfs.unlinkpath('graftstate', ignoremissing=True) |
|
2324 | 2324 | |
|
2325 | 2325 | return 0 |
|
2326 | 2326 | |
|
2327 | 2327 | @command('grep', |
|
2328 | 2328 | [('0', 'print0', None, _('end fields with NUL')), |
|
2329 | 2329 | ('', 'all', None, _('print all revisions that match')), |
|
2330 | 2330 | ('a', 'text', None, _('treat all files as text')), |
|
2331 | 2331 | ('f', 'follow', None, |
|
2332 | 2332 | _('follow changeset history,' |
|
2333 | 2333 | ' or file history across copies and renames')), |
|
2334 | 2334 | ('i', 'ignore-case', None, _('ignore case when matching')), |
|
2335 | 2335 | ('l', 'files-with-matches', None, |
|
2336 | 2336 | _('print only filenames and revisions that match')), |
|
2337 | 2337 | ('n', 'line-number', None, _('print matching line numbers')), |
|
2338 | 2338 | ('r', 'rev', [], |
|
2339 | 2339 | _('only search files changed within revision range'), _('REV')), |
|
2340 | 2340 | ('u', 'user', None, _('list the author (long with -v)')), |
|
2341 | 2341 | ('d', 'date', None, _('list the date (short with -q)')), |
|
2342 | 2342 | ] + formatteropts + walkopts, |
|
2343 | 2343 | _('[OPTION]... PATTERN [FILE]...'), |
|
2344 | 2344 | inferrepo=True, cmdtype=readonly) |
|
2345 | 2345 | def grep(ui, repo, pattern, *pats, **opts): |
|
2346 | 2346 | """search revision history for a pattern in specified files |
|
2347 | 2347 | |
|
2348 | 2348 | Search revision history for a regular expression in the specified |
|
2349 | 2349 | files or the entire project. |
|
2350 | 2350 | |
|
2351 | 2351 | By default, grep prints the most recent revision number for each |
|
2352 | 2352 | file in which it finds a match. To get it to print every revision |
|
2353 | 2353 | that contains a change in match status ("-" for a match that becomes |
|
2354 | 2354 | a non-match, or "+" for a non-match that becomes a match), use the |
|
2355 | 2355 | --all flag. |
|
2356 | 2356 | |
|
2357 | 2357 | PATTERN can be any Python (roughly Perl-compatible) regular |
|
2358 | 2358 | expression. |
|
2359 | 2359 | |
|
2360 | 2360 | If no FILEs are specified (and -f/--follow isn't set), all files in |
|
2361 | 2361 | the repository are searched, including those that don't exist in the |
|
2362 | 2362 | current branch or have been deleted in a prior changeset. |
|
2363 | 2363 | |
|
2364 | 2364 | Returns 0 if a match is found, 1 otherwise. |
|
2365 | 2365 | """ |
|
2366 | 2366 | opts = pycompat.byteskwargs(opts) |
|
2367 | 2367 | reflags = re.M |
|
2368 | 2368 | if opts.get('ignore_case'): |
|
2369 | 2369 | reflags |= re.I |
|
2370 | 2370 | try: |
|
2371 | 2371 | regexp = util.re.compile(pattern, reflags) |
|
2372 | 2372 | except re.error as inst: |
|
2373 | 2373 | ui.warn(_("grep: invalid match pattern: %s\n") % inst) |
|
2374 | 2374 | return 1 |
|
2375 | 2375 | sep, eol = ':', '\n' |
|
2376 | 2376 | if opts.get('print0'): |
|
2377 | 2377 | sep = eol = '\0' |
|
2378 | 2378 | |
|
2379 | 2379 | getfile = util.lrucachefunc(repo.file) |
|
2380 | 2380 | |
|
2381 | 2381 | def matchlines(body): |
|
2382 | 2382 | begin = 0 |
|
2383 | 2383 | linenum = 0 |
|
2384 | 2384 | while begin < len(body): |
|
2385 | 2385 | match = regexp.search(body, begin) |
|
2386 | 2386 | if not match: |
|
2387 | 2387 | break |
|
2388 | 2388 | mstart, mend = match.span() |
|
2389 | 2389 | linenum += body.count('\n', begin, mstart) + 1 |
|
2390 | 2390 | lstart = body.rfind('\n', begin, mstart) + 1 or begin |
|
2391 | 2391 | begin = body.find('\n', mend) + 1 or len(body) + 1 |
|
2392 | 2392 | lend = begin - 1 |
|
2393 | 2393 | yield linenum, mstart - lstart, mend - lstart, body[lstart:lend] |
|
2394 | 2394 | |
|
2395 | 2395 | class linestate(object): |
|
2396 | 2396 | def __init__(self, line, linenum, colstart, colend): |
|
2397 | 2397 | self.line = line |
|
2398 | 2398 | self.linenum = linenum |
|
2399 | 2399 | self.colstart = colstart |
|
2400 | 2400 | self.colend = colend |
|
2401 | 2401 | |
|
2402 | 2402 | def __hash__(self): |
|
2403 | 2403 | return hash((self.linenum, self.line)) |
|
2404 | 2404 | |
|
2405 | 2405 | def __eq__(self, other): |
|
2406 | 2406 | return self.line == other.line |
|
2407 | 2407 | |
|
2408 | 2408 | def findpos(self): |
|
2409 | 2409 | """Iterate all (start, end) indices of matches""" |
|
2410 | 2410 | yield self.colstart, self.colend |
|
2411 | 2411 | p = self.colend |
|
2412 | 2412 | while p < len(self.line): |
|
2413 | 2413 | m = regexp.search(self.line, p) |
|
2414 | 2414 | if not m: |
|
2415 | 2415 | break |
|
2416 | 2416 | yield m.span() |
|
2417 | 2417 | p = m.end() |
|
2418 | 2418 | |
|
2419 | 2419 | matches = {} |
|
2420 | 2420 | copies = {} |
|
2421 | 2421 | def grepbody(fn, rev, body): |
|
2422 | 2422 | matches[rev].setdefault(fn, []) |
|
2423 | 2423 | m = matches[rev][fn] |
|
2424 | 2424 | for lnum, cstart, cend, line in matchlines(body): |
|
2425 | 2425 | s = linestate(line, lnum, cstart, cend) |
|
2426 | 2426 | m.append(s) |
|
2427 | 2427 | |
|
2428 | 2428 | def difflinestates(a, b): |
|
2429 | 2429 | sm = difflib.SequenceMatcher(None, a, b) |
|
2430 | 2430 | for tag, alo, ahi, blo, bhi in sm.get_opcodes(): |
|
2431 | 2431 | if tag == 'insert': |
|
2432 | 2432 | for i in xrange(blo, bhi): |
|
2433 | 2433 | yield ('+', b[i]) |
|
2434 | 2434 | elif tag == 'delete': |
|
2435 | 2435 | for i in xrange(alo, ahi): |
|
2436 | 2436 | yield ('-', a[i]) |
|
2437 | 2437 | elif tag == 'replace': |
|
2438 | 2438 | for i in xrange(alo, ahi): |
|
2439 | 2439 | yield ('-', a[i]) |
|
2440 | 2440 | for i in xrange(blo, bhi): |
|
2441 | 2441 | yield ('+', b[i]) |
|
2442 | 2442 | |
|
2443 | 2443 | def display(fm, fn, ctx, pstates, states): |
|
2444 | 2444 | rev = ctx.rev() |
|
2445 | 2445 | if fm.isplain(): |
|
2446 | 2446 | formatuser = ui.shortuser |
|
2447 | 2447 | else: |
|
2448 | 2448 | formatuser = str |
|
2449 | 2449 | if ui.quiet: |
|
2450 | 2450 | datefmt = '%Y-%m-%d' |
|
2451 | 2451 | else: |
|
2452 | 2452 | datefmt = '%a %b %d %H:%M:%S %Y %1%2' |
|
2453 | 2453 | found = False |
|
2454 | 2454 | @util.cachefunc |
|
2455 | 2455 | def binary(): |
|
2456 | 2456 | flog = getfile(fn) |
|
2457 | 2457 | return util.binary(flog.read(ctx.filenode(fn))) |
|
2458 | 2458 | |
|
2459 | 2459 | fieldnamemap = {'filename': 'file', 'linenumber': 'line_number'} |
|
2460 | 2460 | if opts.get('all'): |
|
2461 | 2461 | iter = difflinestates(pstates, states) |
|
2462 | 2462 | else: |
|
2463 | 2463 | iter = [('', l) for l in states] |
|
2464 | 2464 | for change, l in iter: |
|
2465 | 2465 | fm.startitem() |
|
2466 | 2466 | fm.data(node=fm.hexfunc(ctx.node())) |
|
2467 | 2467 | cols = [ |
|
2468 | 2468 | ('filename', fn, True), |
|
2469 | 2469 | ('rev', rev, True), |
|
2470 | 2470 | ('linenumber', l.linenum, opts.get('line_number')), |
|
2471 | 2471 | ] |
|
2472 | 2472 | if opts.get('all'): |
|
2473 | 2473 | cols.append(('change', change, True)) |
|
2474 | 2474 | cols.extend([ |
|
2475 | 2475 | ('user', formatuser(ctx.user()), opts.get('user')), |
|
2476 | 2476 | ('date', fm.formatdate(ctx.date(), datefmt), opts.get('date')), |
|
2477 | 2477 | ]) |
|
2478 | 2478 | lastcol = next(name for name, data, cond in reversed(cols) if cond) |
|
2479 | 2479 | for name, data, cond in cols: |
|
2480 | 2480 | field = fieldnamemap.get(name, name) |
|
2481 | 2481 | fm.condwrite(cond, field, '%s', data, label='grep.%s' % name) |
|
2482 | 2482 | if cond and name != lastcol: |
|
2483 | 2483 | fm.plain(sep, label='grep.sep') |
|
2484 | 2484 | if not opts.get('files_with_matches'): |
|
2485 | 2485 | fm.plain(sep, label='grep.sep') |
|
2486 | 2486 | if not opts.get('text') and binary(): |
|
2487 | 2487 | fm.plain(_(" Binary file matches")) |
|
2488 | 2488 | else: |
|
2489 | 2489 | displaymatches(fm.nested('texts'), l) |
|
2490 | 2490 | fm.plain(eol) |
|
2491 | 2491 | found = True |
|
2492 | 2492 | if opts.get('files_with_matches'): |
|
2493 | 2493 | break |
|
2494 | 2494 | return found |
|
2495 | 2495 | |
|
2496 | 2496 | def displaymatches(fm, l): |
|
2497 | 2497 | p = 0 |
|
2498 | 2498 | for s, e in l.findpos(): |
|
2499 | 2499 | if p < s: |
|
2500 | 2500 | fm.startitem() |
|
2501 | 2501 | fm.write('text', '%s', l.line[p:s]) |
|
2502 | 2502 | fm.data(matched=False) |
|
2503 | 2503 | fm.startitem() |
|
2504 | 2504 | fm.write('text', '%s', l.line[s:e], label='grep.match') |
|
2505 | 2505 | fm.data(matched=True) |
|
2506 | 2506 | p = e |
|
2507 | 2507 | if p < len(l.line): |
|
2508 | 2508 | fm.startitem() |
|
2509 | 2509 | fm.write('text', '%s', l.line[p:]) |
|
2510 | 2510 | fm.data(matched=False) |
|
2511 | 2511 | fm.end() |
|
2512 | 2512 | |
|
2513 | 2513 | skip = {} |
|
2514 | 2514 | revfiles = {} |
|
2515 | 2515 | match = scmutil.match(repo[None], pats, opts) |
|
2516 | 2516 | found = False |
|
2517 | 2517 | follow = opts.get('follow') |
|
2518 | 2518 | |
|
2519 | 2519 | def prep(ctx, fns): |
|
2520 | 2520 | rev = ctx.rev() |
|
2521 | 2521 | pctx = ctx.p1() |
|
2522 | 2522 | parent = pctx.rev() |
|
2523 | 2523 | matches.setdefault(rev, {}) |
|
2524 | 2524 | matches.setdefault(parent, {}) |
|
2525 | 2525 | files = revfiles.setdefault(rev, []) |
|
2526 | 2526 | for fn in fns: |
|
2527 | 2527 | flog = getfile(fn) |
|
2528 | 2528 | try: |
|
2529 | 2529 | fnode = ctx.filenode(fn) |
|
2530 | 2530 | except error.LookupError: |
|
2531 | 2531 | continue |
|
2532 | 2532 | |
|
2533 | 2533 | copied = flog.renamed(fnode) |
|
2534 | 2534 | copy = follow and copied and copied[0] |
|
2535 | 2535 | if copy: |
|
2536 | 2536 | copies.setdefault(rev, {})[fn] = copy |
|
2537 | 2537 | if fn in skip: |
|
2538 | 2538 | if copy: |
|
2539 | 2539 | skip[copy] = True |
|
2540 | 2540 | continue |
|
2541 | 2541 | files.append(fn) |
|
2542 | 2542 | |
|
2543 | 2543 | if fn not in matches[rev]: |
|
2544 | 2544 | grepbody(fn, rev, flog.read(fnode)) |
|
2545 | 2545 | |
|
2546 | 2546 | pfn = copy or fn |
|
2547 | 2547 | if pfn not in matches[parent]: |
|
2548 | 2548 | try: |
|
2549 | 2549 | fnode = pctx.filenode(pfn) |
|
2550 | 2550 | grepbody(pfn, parent, flog.read(fnode)) |
|
2551 | 2551 | except error.LookupError: |
|
2552 | 2552 | pass |
|
2553 | 2553 | |
|
2554 | 2554 | ui.pager('grep') |
|
2555 | 2555 | fm = ui.formatter('grep', opts) |
|
2556 | 2556 | for ctx in cmdutil.walkchangerevs(repo, match, opts, prep): |
|
2557 | 2557 | rev = ctx.rev() |
|
2558 | 2558 | parent = ctx.p1().rev() |
|
2559 | 2559 | for fn in sorted(revfiles.get(rev, [])): |
|
2560 | 2560 | states = matches[rev][fn] |
|
2561 | 2561 | copy = copies.get(rev, {}).get(fn) |
|
2562 | 2562 | if fn in skip: |
|
2563 | 2563 | if copy: |
|
2564 | 2564 | skip[copy] = True |
|
2565 | 2565 | continue |
|
2566 | 2566 | pstates = matches.get(parent, {}).get(copy or fn, []) |
|
2567 | 2567 | if pstates or states: |
|
2568 | 2568 | r = display(fm, fn, ctx, pstates, states) |
|
2569 | 2569 | found = found or r |
|
2570 | 2570 | if r and not opts.get('all'): |
|
2571 | 2571 | skip[fn] = True |
|
2572 | 2572 | if copy: |
|
2573 | 2573 | skip[copy] = True |
|
2574 | 2574 | del matches[rev] |
|
2575 | 2575 | del revfiles[rev] |
|
2576 | 2576 | fm.end() |
|
2577 | 2577 | |
|
2578 | 2578 | return not found |
|
2579 | 2579 | |
|
2580 | 2580 | @command('heads', |
|
2581 | 2581 | [('r', 'rev', '', |
|
2582 | 2582 | _('show only heads which are descendants of STARTREV'), _('STARTREV')), |
|
2583 | 2583 | ('t', 'topo', False, _('show topological heads only')), |
|
2584 | 2584 | ('a', 'active', False, _('show active branchheads only (DEPRECATED)')), |
|
2585 | 2585 | ('c', 'closed', False, _('show normal and closed branch heads')), |
|
2586 | 2586 | ] + templateopts, |
|
2587 | 2587 | _('[-ct] [-r STARTREV] [REV]...'), cmdtype=readonly) |
|
2588 | 2588 | def heads(ui, repo, *branchrevs, **opts): |
|
2589 | 2589 | """show branch heads |
|
2590 | 2590 | |
|
2591 | 2591 | With no arguments, show all open branch heads in the repository. |
|
2592 | 2592 | Branch heads are changesets that have no descendants on the |
|
2593 | 2593 | same branch. They are where development generally takes place and |
|
2594 | 2594 | are the usual targets for update and merge operations. |
|
2595 | 2595 | |
|
2596 | 2596 | If one or more REVs are given, only open branch heads on the |
|
2597 | 2597 | branches associated with the specified changesets are shown. This |
|
2598 | 2598 | means that you can use :hg:`heads .` to see the heads on the |
|
2599 | 2599 | currently checked-out branch. |
|
2600 | 2600 | |
|
2601 | 2601 | If -c/--closed is specified, also show branch heads marked closed |
|
2602 | 2602 | (see :hg:`commit --close-branch`). |
|
2603 | 2603 | |
|
2604 | 2604 | If STARTREV is specified, only those heads that are descendants of |
|
2605 | 2605 | STARTREV will be displayed. |
|
2606 | 2606 | |
|
2607 | 2607 | If -t/--topo is specified, named branch mechanics will be ignored and only |
|
2608 | 2608 | topological heads (changesets with no children) will be shown. |
|
2609 | 2609 | |
|
2610 | 2610 | Returns 0 if matching heads are found, 1 if not. |
|
2611 | 2611 | """ |
|
2612 | 2612 | |
|
2613 | 2613 | opts = pycompat.byteskwargs(opts) |
|
2614 | 2614 | start = None |
|
2615 | 2615 | rev = opts.get('rev') |
|
2616 | 2616 | if rev: |
|
2617 | 2617 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
2618 | 2618 | start = scmutil.revsingle(repo, rev, None).node() |
|
2619 | 2619 | |
|
2620 | 2620 | if opts.get('topo'): |
|
2621 | 2621 | heads = [repo[h] for h in repo.heads(start)] |
|
2622 | 2622 | else: |
|
2623 | 2623 | heads = [] |
|
2624 | 2624 | for branch in repo.branchmap(): |
|
2625 | 2625 | heads += repo.branchheads(branch, start, opts.get('closed')) |
|
2626 | 2626 | heads = [repo[h] for h in heads] |
|
2627 | 2627 | |
|
2628 | 2628 | if branchrevs: |
|
2629 | 2629 | branches = set(repo[br].branch() for br in branchrevs) |
|
2630 | 2630 | heads = [h for h in heads if h.branch() in branches] |
|
2631 | 2631 | |
|
2632 | 2632 | if opts.get('active') and branchrevs: |
|
2633 | 2633 | dagheads = repo.heads(start) |
|
2634 | 2634 | heads = [h for h in heads if h.node() in dagheads] |
|
2635 | 2635 | |
|
2636 | 2636 | if branchrevs: |
|
2637 | 2637 | haveheads = set(h.branch() for h in heads) |
|
2638 | 2638 | if branches - haveheads: |
|
2639 | 2639 | headless = ', '.join(b for b in branches - haveheads) |
|
2640 | 2640 | msg = _('no open branch heads found on branches %s') |
|
2641 | 2641 | if opts.get('rev'): |
|
2642 | 2642 | msg += _(' (started at %s)') % opts['rev'] |
|
2643 | 2643 | ui.warn((msg + '\n') % headless) |
|
2644 | 2644 | |
|
2645 | 2645 | if not heads: |
|
2646 | 2646 | return 1 |
|
2647 | 2647 | |
|
2648 | 2648 | ui.pager('heads') |
|
2649 | 2649 | heads = sorted(heads, key=lambda x: -x.rev()) |
|
2650 | 2650 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
2651 | 2651 | for ctx in heads: |
|
2652 | 2652 | displayer.show(ctx) |
|
2653 | 2653 | displayer.close() |
|
2654 | 2654 | |
|
2655 | 2655 | @command('help', |
|
2656 | 2656 | [('e', 'extension', None, _('show only help for extensions')), |
|
2657 | 2657 | ('c', 'command', None, _('show only help for commands')), |
|
2658 | 2658 | ('k', 'keyword', None, _('show topics matching keyword')), |
|
2659 | 2659 | ('s', 'system', [], _('show help for specific platform(s)')), |
|
2660 | 2660 | ], |
|
2661 | 2661 | _('[-ecks] [TOPIC]'), |
|
2662 | 2662 | norepo=True, cmdtype=readonly) |
|
2663 | 2663 | def help_(ui, name=None, **opts): |
|
2664 | 2664 | """show help for a given topic or a help overview |
|
2665 | 2665 | |
|
2666 | 2666 | With no arguments, print a list of commands with short help messages. |
|
2667 | 2667 | |
|
2668 | 2668 | Given a topic, extension, or command name, print help for that |
|
2669 | 2669 | topic. |
|
2670 | 2670 | |
|
2671 | 2671 | Returns 0 if successful. |
|
2672 | 2672 | """ |
|
2673 | 2673 | |
|
2674 | 2674 | keep = opts.get(r'system') or [] |
|
2675 | 2675 | if len(keep) == 0: |
|
2676 | 2676 | if pycompat.sysplatform.startswith('win'): |
|
2677 | 2677 | keep.append('windows') |
|
2678 | 2678 | elif pycompat.sysplatform == 'OpenVMS': |
|
2679 | 2679 | keep.append('vms') |
|
2680 | 2680 | elif pycompat.sysplatform == 'plan9': |
|
2681 | 2681 | keep.append('plan9') |
|
2682 | 2682 | else: |
|
2683 | 2683 | keep.append('unix') |
|
2684 | 2684 | keep.append(pycompat.sysplatform.lower()) |
|
2685 | 2685 | if ui.verbose: |
|
2686 | 2686 | keep.append('verbose') |
|
2687 | 2687 | |
|
2688 | 2688 | commands = sys.modules[__name__] |
|
2689 | 2689 | formatted = help.formattedhelp(ui, commands, name, keep=keep, **opts) |
|
2690 | 2690 | ui.pager('help') |
|
2691 | 2691 | ui.write(formatted) |
|
2692 | 2692 | |
|
2693 | 2693 | |
|
2694 | 2694 | @command('identify|id', |
|
2695 | 2695 | [('r', 'rev', '', |
|
2696 | 2696 | _('identify the specified revision'), _('REV')), |
|
2697 | 2697 | ('n', 'num', None, _('show local revision number')), |
|
2698 | 2698 | ('i', 'id', None, _('show global revision id')), |
|
2699 | 2699 | ('b', 'branch', None, _('show branch')), |
|
2700 | 2700 | ('t', 'tags', None, _('show tags')), |
|
2701 | 2701 | ('B', 'bookmarks', None, _('show bookmarks')), |
|
2702 | 2702 | ] + remoteopts + formatteropts, |
|
2703 | 2703 | _('[-nibtB] [-r REV] [SOURCE]'), |
|
2704 | 2704 | optionalrepo=True, cmdtype=readonly) |
|
2705 | 2705 | def identify(ui, repo, source=None, rev=None, |
|
2706 | 2706 | num=None, id=None, branch=None, tags=None, bookmarks=None, **opts): |
|
2707 | 2707 | """identify the working directory or specified revision |
|
2708 | 2708 | |
|
2709 | 2709 | Print a summary identifying the repository state at REV using one or |
|
2710 | 2710 | two parent hash identifiers, followed by a "+" if the working |
|
2711 | 2711 | directory has uncommitted changes, the branch name (if not default), |
|
2712 | 2712 | a list of tags, and a list of bookmarks. |
|
2713 | 2713 | |
|
2714 | 2714 | When REV is not given, print a summary of the current state of the |
|
2715 | 2715 | repository including the working directory. Specify -r. to get information |
|
2716 | 2716 | of the working directory parent without scanning uncommitted changes. |
|
2717 | 2717 | |
|
2718 | 2718 | Specifying a path to a repository root or Mercurial bundle will |
|
2719 | 2719 | cause lookup to operate on that repository/bundle. |
|
2720 | 2720 | |
|
2721 | 2721 | .. container:: verbose |
|
2722 | 2722 | |
|
2723 | 2723 | Examples: |
|
2724 | 2724 | |
|
2725 | 2725 | - generate a build identifier for the working directory:: |
|
2726 | 2726 | |
|
2727 | 2727 | hg id --id > build-id.dat |
|
2728 | 2728 | |
|
2729 | 2729 | - find the revision corresponding to a tag:: |
|
2730 | 2730 | |
|
2731 | 2731 | hg id -n -r 1.3 |
|
2732 | 2732 | |
|
2733 | 2733 | - check the most recent revision of a remote repository:: |
|
2734 | 2734 | |
|
2735 | 2735 | hg id -r tip https://www.mercurial-scm.org/repo/hg/ |
|
2736 | 2736 | |
|
2737 | 2737 | See :hg:`log` for generating more information about specific revisions, |
|
2738 | 2738 | including full hash identifiers. |
|
2739 | 2739 | |
|
2740 | 2740 | Returns 0 if successful. |
|
2741 | 2741 | """ |
|
2742 | 2742 | |
|
2743 | 2743 | opts = pycompat.byteskwargs(opts) |
|
2744 | 2744 | if not repo and not source: |
|
2745 | 2745 | raise error.Abort(_("there is no Mercurial repository here " |
|
2746 | 2746 | "(.hg not found)")) |
|
2747 | 2747 | |
|
2748 | 2748 | if ui.debugflag: |
|
2749 | 2749 | hexfunc = hex |
|
2750 | 2750 | else: |
|
2751 | 2751 | hexfunc = short |
|
2752 | 2752 | default = not (num or id or branch or tags or bookmarks) |
|
2753 | 2753 | output = [] |
|
2754 | 2754 | revs = [] |
|
2755 | 2755 | |
|
2756 | 2756 | if source: |
|
2757 | 2757 | source, branches = hg.parseurl(ui.expandpath(source)) |
|
2758 | 2758 | peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo |
|
2759 | 2759 | repo = peer.local() |
|
2760 | 2760 | revs, checkout = hg.addbranchrevs(repo, peer, branches, None) |
|
2761 | 2761 | |
|
2762 | 2762 | fm = ui.formatter('identify', opts) |
|
2763 | 2763 | fm.startitem() |
|
2764 | 2764 | |
|
2765 | 2765 | if not repo: |
|
2766 | 2766 | if num or branch or tags: |
|
2767 | 2767 | raise error.Abort( |
|
2768 | 2768 | _("can't query remote revision number, branch, or tags")) |
|
2769 | 2769 | if not rev and revs: |
|
2770 | 2770 | rev = revs[0] |
|
2771 | 2771 | if not rev: |
|
2772 | 2772 | rev = "tip" |
|
2773 | 2773 | |
|
2774 | 2774 | remoterev = peer.lookup(rev) |
|
2775 | 2775 | hexrev = hexfunc(remoterev) |
|
2776 | 2776 | if default or id: |
|
2777 | 2777 | output = [hexrev] |
|
2778 | 2778 | fm.data(id=hexrev) |
|
2779 | 2779 | |
|
2780 | 2780 | def getbms(): |
|
2781 | 2781 | bms = [] |
|
2782 | 2782 | |
|
2783 | 2783 | if 'bookmarks' in peer.listkeys('namespaces'): |
|
2784 | 2784 | hexremoterev = hex(remoterev) |
|
2785 | 2785 | bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems() |
|
2786 | 2786 | if bmr == hexremoterev] |
|
2787 | 2787 | |
|
2788 | 2788 | return sorted(bms) |
|
2789 | 2789 | |
|
2790 | 2790 | bms = getbms() |
|
2791 | 2791 | if bookmarks: |
|
2792 | 2792 | output.extend(bms) |
|
2793 | 2793 | elif default and not ui.quiet: |
|
2794 | 2794 | # multiple bookmarks for a single parent separated by '/' |
|
2795 | 2795 | bm = '/'.join(bms) |
|
2796 | 2796 | if bm: |
|
2797 | 2797 | output.append(bm) |
|
2798 | 2798 | |
|
2799 | 2799 | fm.data(node=hex(remoterev)) |
|
2800 | 2800 | fm.data(bookmarks=fm.formatlist(bms, name='bookmark')) |
|
2801 | 2801 | else: |
|
2802 | 2802 | if rev: |
|
2803 | 2803 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
2804 | 2804 | ctx = scmutil.revsingle(repo, rev, None) |
|
2805 | 2805 | |
|
2806 | 2806 | if ctx.rev() is None: |
|
2807 | 2807 | ctx = repo[None] |
|
2808 | 2808 | parents = ctx.parents() |
|
2809 | 2809 | taglist = [] |
|
2810 | 2810 | for p in parents: |
|
2811 | 2811 | taglist.extend(p.tags()) |
|
2812 | 2812 | |
|
2813 | 2813 | dirty = "" |
|
2814 | 2814 | if ctx.dirty(missing=True, merge=False, branch=False): |
|
2815 | 2815 | dirty = '+' |
|
2816 | 2816 | fm.data(dirty=dirty) |
|
2817 | 2817 | |
|
2818 | 2818 | hexoutput = [hexfunc(p.node()) for p in parents] |
|
2819 | 2819 | if default or id: |
|
2820 | 2820 | output = ["%s%s" % ('+'.join(hexoutput), dirty)] |
|
2821 | 2821 | fm.data(id="%s%s" % ('+'.join(hexoutput), dirty)) |
|
2822 | 2822 | |
|
2823 | 2823 | if num: |
|
2824 | 2824 | numoutput = ["%d" % p.rev() for p in parents] |
|
2825 | 2825 | output.append("%s%s" % ('+'.join(numoutput), dirty)) |
|
2826 | 2826 | |
|
2827 | 2827 | fn = fm.nested('parents') |
|
2828 | 2828 | for p in parents: |
|
2829 | 2829 | fn.startitem() |
|
2830 | 2830 | fn.data(rev=p.rev()) |
|
2831 | 2831 | fn.data(node=p.hex()) |
|
2832 | 2832 | fn.context(ctx=p) |
|
2833 | 2833 | fn.end() |
|
2834 | 2834 | else: |
|
2835 | 2835 | hexoutput = hexfunc(ctx.node()) |
|
2836 | 2836 | if default or id: |
|
2837 | 2837 | output = [hexoutput] |
|
2838 | 2838 | fm.data(id=hexoutput) |
|
2839 | 2839 | |
|
2840 | 2840 | if num: |
|
2841 | 2841 | output.append(pycompat.bytestr(ctx.rev())) |
|
2842 | 2842 | taglist = ctx.tags() |
|
2843 | 2843 | |
|
2844 | 2844 | if default and not ui.quiet: |
|
2845 | 2845 | b = ctx.branch() |
|
2846 | 2846 | if b != 'default': |
|
2847 | 2847 | output.append("(%s)" % b) |
|
2848 | 2848 | |
|
2849 | 2849 | # multiple tags for a single parent separated by '/' |
|
2850 | 2850 | t = '/'.join(taglist) |
|
2851 | 2851 | if t: |
|
2852 | 2852 | output.append(t) |
|
2853 | 2853 | |
|
2854 | 2854 | # multiple bookmarks for a single parent separated by '/' |
|
2855 | 2855 | bm = '/'.join(ctx.bookmarks()) |
|
2856 | 2856 | if bm: |
|
2857 | 2857 | output.append(bm) |
|
2858 | 2858 | else: |
|
2859 | 2859 | if branch: |
|
2860 | 2860 | output.append(ctx.branch()) |
|
2861 | 2861 | |
|
2862 | 2862 | if tags: |
|
2863 | 2863 | output.extend(taglist) |
|
2864 | 2864 | |
|
2865 | 2865 | if bookmarks: |
|
2866 | 2866 | output.extend(ctx.bookmarks()) |
|
2867 | 2867 | |
|
2868 | 2868 | fm.data(node=ctx.hex()) |
|
2869 | 2869 | fm.data(branch=ctx.branch()) |
|
2870 | 2870 | fm.data(tags=fm.formatlist(taglist, name='tag', sep=':')) |
|
2871 | 2871 | fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name='bookmark')) |
|
2872 | 2872 | fm.context(ctx=ctx) |
|
2873 | 2873 | |
|
2874 | 2874 | fm.plain("%s\n" % ' '.join(output)) |
|
2875 | 2875 | fm.end() |
|
2876 | 2876 | |
|
2877 | 2877 | @command('import|patch', |
|
2878 | 2878 | [('p', 'strip', 1, |
|
2879 | 2879 | _('directory strip option for patch. This has the same ' |
|
2880 | 2880 | 'meaning as the corresponding patch option'), _('NUM')), |
|
2881 | 2881 | ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')), |
|
2882 | 2882 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
2883 | 2883 | ('f', 'force', None, |
|
2884 | 2884 | _('skip check for outstanding uncommitted changes (DEPRECATED)')), |
|
2885 | 2885 | ('', 'no-commit', None, |
|
2886 | 2886 | _("don't commit, just update the working directory")), |
|
2887 | 2887 | ('', 'bypass', None, |
|
2888 | 2888 | _("apply patch without touching the working directory")), |
|
2889 | 2889 | ('', 'partial', None, |
|
2890 | 2890 | _('commit even if some hunks fail')), |
|
2891 | 2891 | ('', 'exact', None, |
|
2892 | 2892 | _('abort if patch would apply lossily')), |
|
2893 | 2893 | ('', 'prefix', '', |
|
2894 | 2894 | _('apply patch to subdirectory'), _('DIR')), |
|
2895 | 2895 | ('', 'import-branch', None, |
|
2896 | 2896 | _('use any branch information in patch (implied by --exact)'))] + |
|
2897 | 2897 | commitopts + commitopts2 + similarityopts, |
|
2898 | 2898 | _('[OPTION]... PATCH...')) |
|
2899 | 2899 | def import_(ui, repo, patch1=None, *patches, **opts): |
|
2900 | 2900 | """import an ordered set of patches |
|
2901 | 2901 | |
|
2902 | 2902 | Import a list of patches and commit them individually (unless |
|
2903 | 2903 | --no-commit is specified). |
|
2904 | 2904 | |
|
2905 | 2905 | To read a patch from standard input (stdin), use "-" as the patch |
|
2906 | 2906 | name. If a URL is specified, the patch will be downloaded from |
|
2907 | 2907 | there. |
|
2908 | 2908 | |
|
2909 | 2909 | Import first applies changes to the working directory (unless |
|
2910 | 2910 | --bypass is specified), import will abort if there are outstanding |
|
2911 | 2911 | changes. |
|
2912 | 2912 | |
|
2913 | 2913 | Use --bypass to apply and commit patches directly to the |
|
2914 | 2914 | repository, without affecting the working directory. Without |
|
2915 | 2915 | --exact, patches will be applied on top of the working directory |
|
2916 | 2916 | parent revision. |
|
2917 | 2917 | |
|
2918 | 2918 | You can import a patch straight from a mail message. Even patches |
|
2919 | 2919 | as attachments work (to use the body part, it must have type |
|
2920 | 2920 | text/plain or text/x-patch). From and Subject headers of email |
|
2921 | 2921 | message are used as default committer and commit message. All |
|
2922 | 2922 | text/plain body parts before first diff are added to the commit |
|
2923 | 2923 | message. |
|
2924 | 2924 | |
|
2925 | 2925 | If the imported patch was generated by :hg:`export`, user and |
|
2926 | 2926 | description from patch override values from message headers and |
|
2927 | 2927 | body. Values given on command line with -m/--message and -u/--user |
|
2928 | 2928 | override these. |
|
2929 | 2929 | |
|
2930 | 2930 | If --exact is specified, import will set the working directory to |
|
2931 | 2931 | the parent of each patch before applying it, and will abort if the |
|
2932 | 2932 | resulting changeset has a different ID than the one recorded in |
|
2933 | 2933 | the patch. This will guard against various ways that portable |
|
2934 | 2934 | patch formats and mail systems might fail to transfer Mercurial |
|
2935 | 2935 | data or metadata. See :hg:`bundle` for lossless transmission. |
|
2936 | 2936 | |
|
2937 | 2937 | Use --partial to ensure a changeset will be created from the patch |
|
2938 | 2938 | even if some hunks fail to apply. Hunks that fail to apply will be |
|
2939 | 2939 | written to a <target-file>.rej file. Conflicts can then be resolved |
|
2940 | 2940 | by hand before :hg:`commit --amend` is run to update the created |
|
2941 | 2941 | changeset. This flag exists to let people import patches that |
|
2942 | 2942 | partially apply without losing the associated metadata (author, |
|
2943 | 2943 | date, description, ...). |
|
2944 | 2944 | |
|
2945 | 2945 | .. note:: |
|
2946 | 2946 | |
|
2947 | 2947 | When no hunks apply cleanly, :hg:`import --partial` will create |
|
2948 | 2948 | an empty changeset, importing only the patch metadata. |
|
2949 | 2949 | |
|
2950 | 2950 | With -s/--similarity, hg will attempt to discover renames and |
|
2951 | 2951 | copies in the patch in the same way as :hg:`addremove`. |
|
2952 | 2952 | |
|
2953 | 2953 | It is possible to use external patch programs to perform the patch |
|
2954 | 2954 | by setting the ``ui.patch`` configuration option. For the default |
|
2955 | 2955 | internal tool, the fuzz can also be configured via ``patch.fuzz``. |
|
2956 | 2956 | See :hg:`help config` for more information about configuration |
|
2957 | 2957 | files and how to use these options. |
|
2958 | 2958 | |
|
2959 | 2959 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
2960 | 2960 | |
|
2961 | 2961 | .. container:: verbose |
|
2962 | 2962 | |
|
2963 | 2963 | Examples: |
|
2964 | 2964 | |
|
2965 | 2965 | - import a traditional patch from a website and detect renames:: |
|
2966 | 2966 | |
|
2967 | 2967 | hg import -s 80 http://example.com/bugfix.patch |
|
2968 | 2968 | |
|
2969 | 2969 | - import a changeset from an hgweb server:: |
|
2970 | 2970 | |
|
2971 | 2971 | hg import https://www.mercurial-scm.org/repo/hg/rev/5ca8c111e9aa |
|
2972 | 2972 | |
|
2973 | 2973 | - import all the patches in an Unix-style mbox:: |
|
2974 | 2974 | |
|
2975 | 2975 | hg import incoming-patches.mbox |
|
2976 | 2976 | |
|
2977 | 2977 | - import patches from stdin:: |
|
2978 | 2978 | |
|
2979 | 2979 | hg import - |
|
2980 | 2980 | |
|
2981 | 2981 | - attempt to exactly restore an exported changeset (not always |
|
2982 | 2982 | possible):: |
|
2983 | 2983 | |
|
2984 | 2984 | hg import --exact proposed-fix.patch |
|
2985 | 2985 | |
|
2986 | 2986 | - use an external tool to apply a patch which is too fuzzy for |
|
2987 | 2987 | the default internal tool. |
|
2988 | 2988 | |
|
2989 | 2989 | hg import --config ui.patch="patch --merge" fuzzy.patch |
|
2990 | 2990 | |
|
2991 | 2991 | - change the default fuzzing from 2 to a less strict 7 |
|
2992 | 2992 | |
|
2993 | 2993 | hg import --config ui.fuzz=7 fuzz.patch |
|
2994 | 2994 | |
|
2995 | 2995 | Returns 0 on success, 1 on partial success (see --partial). |
|
2996 | 2996 | """ |
|
2997 | 2997 | |
|
2998 | 2998 | opts = pycompat.byteskwargs(opts) |
|
2999 | 2999 | if not patch1: |
|
3000 | 3000 | raise error.Abort(_('need at least one patch to import')) |
|
3001 | 3001 | |
|
3002 | 3002 | patches = (patch1,) + patches |
|
3003 | 3003 | |
|
3004 | 3004 | date = opts.get('date') |
|
3005 | 3005 | if date: |
|
3006 | 3006 | opts['date'] = util.parsedate(date) |
|
3007 | 3007 | |
|
3008 | 3008 | exact = opts.get('exact') |
|
3009 | 3009 | update = not opts.get('bypass') |
|
3010 | 3010 | if not update and opts.get('no_commit'): |
|
3011 | 3011 | raise error.Abort(_('cannot use --no-commit with --bypass')) |
|
3012 | 3012 | try: |
|
3013 | 3013 | sim = float(opts.get('similarity') or 0) |
|
3014 | 3014 | except ValueError: |
|
3015 | 3015 | raise error.Abort(_('similarity must be a number')) |
|
3016 | 3016 | if sim < 0 or sim > 100: |
|
3017 | 3017 | raise error.Abort(_('similarity must be between 0 and 100')) |
|
3018 | 3018 | if sim and not update: |
|
3019 | 3019 | raise error.Abort(_('cannot use --similarity with --bypass')) |
|
3020 | 3020 | if exact: |
|
3021 | 3021 | if opts.get('edit'): |
|
3022 | 3022 | raise error.Abort(_('cannot use --exact with --edit')) |
|
3023 | 3023 | if opts.get('prefix'): |
|
3024 | 3024 | raise error.Abort(_('cannot use --exact with --prefix')) |
|
3025 | 3025 | |
|
3026 | 3026 | base = opts["base"] |
|
3027 | 3027 | wlock = dsguard = lock = tr = None |
|
3028 | 3028 | msgs = [] |
|
3029 | 3029 | ret = 0 |
|
3030 | 3030 | |
|
3031 | 3031 | |
|
3032 | 3032 | try: |
|
3033 | 3033 | wlock = repo.wlock() |
|
3034 | 3034 | |
|
3035 | 3035 | if update: |
|
3036 | 3036 | cmdutil.checkunfinished(repo) |
|
3037 | 3037 | if (exact or not opts.get('force')): |
|
3038 | 3038 | cmdutil.bailifchanged(repo) |
|
3039 | 3039 | |
|
3040 | 3040 | if not opts.get('no_commit'): |
|
3041 | 3041 | lock = repo.lock() |
|
3042 | 3042 | tr = repo.transaction('import') |
|
3043 | 3043 | else: |
|
3044 | 3044 | dsguard = dirstateguard.dirstateguard(repo, 'import') |
|
3045 | 3045 | parents = repo[None].parents() |
|
3046 | 3046 | for patchurl in patches: |
|
3047 | 3047 | if patchurl == '-': |
|
3048 | 3048 | ui.status(_('applying patch from stdin\n')) |
|
3049 | 3049 | patchfile = ui.fin |
|
3050 | 3050 | patchurl = 'stdin' # for error message |
|
3051 | 3051 | else: |
|
3052 | 3052 | patchurl = os.path.join(base, patchurl) |
|
3053 | 3053 | ui.status(_('applying %s\n') % patchurl) |
|
3054 | 3054 | patchfile = hg.openpath(ui, patchurl) |
|
3055 | 3055 | |
|
3056 | 3056 | haspatch = False |
|
3057 | 3057 | for hunk in patch.split(patchfile): |
|
3058 | 3058 | (msg, node, rej) = cmdutil.tryimportone(ui, repo, hunk, |
|
3059 | 3059 | parents, opts, |
|
3060 | 3060 | msgs, hg.clean) |
|
3061 | 3061 | if msg: |
|
3062 | 3062 | haspatch = True |
|
3063 | 3063 | ui.note(msg + '\n') |
|
3064 | 3064 | if update or exact: |
|
3065 | 3065 | parents = repo[None].parents() |
|
3066 | 3066 | else: |
|
3067 | 3067 | parents = [repo[node]] |
|
3068 | 3068 | if rej: |
|
3069 | 3069 | ui.write_err(_("patch applied partially\n")) |
|
3070 | 3070 | ui.write_err(_("(fix the .rej files and run " |
|
3071 | 3071 | "`hg commit --amend`)\n")) |
|
3072 | 3072 | ret = 1 |
|
3073 | 3073 | break |
|
3074 | 3074 | |
|
3075 | 3075 | if not haspatch: |
|
3076 | 3076 | raise error.Abort(_('%s: no diffs found') % patchurl) |
|
3077 | 3077 | |
|
3078 | 3078 | if tr: |
|
3079 | 3079 | tr.close() |
|
3080 | 3080 | if msgs: |
|
3081 | 3081 | repo.savecommitmessage('\n* * *\n'.join(msgs)) |
|
3082 | 3082 | if dsguard: |
|
3083 | 3083 | dsguard.close() |
|
3084 | 3084 | return ret |
|
3085 | 3085 | finally: |
|
3086 | 3086 | if tr: |
|
3087 | 3087 | tr.release() |
|
3088 | 3088 | release(lock, dsguard, wlock) |
|
3089 | 3089 | |
|
3090 | 3090 | @command('incoming|in', |
|
3091 | 3091 | [('f', 'force', None, |
|
3092 | 3092 | _('run even if remote repository is unrelated')), |
|
3093 | 3093 | ('n', 'newest-first', None, _('show newest record first')), |
|
3094 | 3094 | ('', 'bundle', '', |
|
3095 | 3095 | _('file to store the bundles into'), _('FILE')), |
|
3096 | 3096 | ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')), |
|
3097 | 3097 | ('B', 'bookmarks', False, _("compare bookmarks")), |
|
3098 | 3098 | ('b', 'branch', [], |
|
3099 | 3099 | _('a specific branch you would like to pull'), _('BRANCH')), |
|
3100 | 3100 | ] + logopts + remoteopts + subrepoopts, |
|
3101 | 3101 | _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]')) |
|
3102 | 3102 | def incoming(ui, repo, source="default", **opts): |
|
3103 | 3103 | """show new changesets found in source |
|
3104 | 3104 | |
|
3105 | 3105 | Show new changesets found in the specified path/URL or the default |
|
3106 | 3106 | pull location. These are the changesets that would have been pulled |
|
3107 | 3107 | by :hg:`pull` at the time you issued this command. |
|
3108 | 3108 | |
|
3109 | 3109 | See pull for valid source format details. |
|
3110 | 3110 | |
|
3111 | 3111 | .. container:: verbose |
|
3112 | 3112 | |
|
3113 | 3113 | With -B/--bookmarks, the result of bookmark comparison between |
|
3114 | 3114 | local and remote repositories is displayed. With -v/--verbose, |
|
3115 | 3115 | status is also displayed for each bookmark like below:: |
|
3116 | 3116 | |
|
3117 | 3117 | BM1 01234567890a added |
|
3118 | 3118 | BM2 1234567890ab advanced |
|
3119 | 3119 | BM3 234567890abc diverged |
|
3120 | 3120 | BM4 34567890abcd changed |
|
3121 | 3121 | |
|
3122 | 3122 | The action taken locally when pulling depends on the |
|
3123 | 3123 | status of each bookmark: |
|
3124 | 3124 | |
|
3125 | 3125 | :``added``: pull will create it |
|
3126 | 3126 | :``advanced``: pull will update it |
|
3127 | 3127 | :``diverged``: pull will create a divergent bookmark |
|
3128 | 3128 | :``changed``: result depends on remote changesets |
|
3129 | 3129 | |
|
3130 | 3130 | From the point of view of pulling behavior, bookmark |
|
3131 | 3131 | existing only in the remote repository are treated as ``added``, |
|
3132 | 3132 | even if it is in fact locally deleted. |
|
3133 | 3133 | |
|
3134 | 3134 | .. container:: verbose |
|
3135 | 3135 | |
|
3136 | 3136 | For remote repository, using --bundle avoids downloading the |
|
3137 | 3137 | changesets twice if the incoming is followed by a pull. |
|
3138 | 3138 | |
|
3139 | 3139 | Examples: |
|
3140 | 3140 | |
|
3141 | 3141 | - show incoming changes with patches and full description:: |
|
3142 | 3142 | |
|
3143 | 3143 | hg incoming -vp |
|
3144 | 3144 | |
|
3145 | 3145 | - show incoming changes excluding merges, store a bundle:: |
|
3146 | 3146 | |
|
3147 | 3147 | hg in -vpM --bundle incoming.hg |
|
3148 | 3148 | hg pull incoming.hg |
|
3149 | 3149 | |
|
3150 | 3150 | - briefly list changes inside a bundle:: |
|
3151 | 3151 | |
|
3152 | 3152 | hg in changes.hg -T "{desc|firstline}\\n" |
|
3153 | 3153 | |
|
3154 | 3154 | Returns 0 if there are incoming changes, 1 otherwise. |
|
3155 | 3155 | """ |
|
3156 | 3156 | opts = pycompat.byteskwargs(opts) |
|
3157 | 3157 | if opts.get('graph'): |
|
3158 | 3158 | logcmdutil.checkunsupportedgraphflags([], opts) |
|
3159 | 3159 | def display(other, chlist, displayer): |
|
3160 | 3160 | revdag = logcmdutil.graphrevs(other, chlist, opts) |
|
3161 | 3161 | logcmdutil.displaygraph(ui, repo, revdag, displayer, |
|
3162 | 3162 | graphmod.asciiedges) |
|
3163 | 3163 | |
|
3164 | 3164 | hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True) |
|
3165 | 3165 | return 0 |
|
3166 | 3166 | |
|
3167 | 3167 | if opts.get('bundle') and opts.get('subrepos'): |
|
3168 | 3168 | raise error.Abort(_('cannot combine --bundle and --subrepos')) |
|
3169 | 3169 | |
|
3170 | 3170 | if opts.get('bookmarks'): |
|
3171 | 3171 | source, branches = hg.parseurl(ui.expandpath(source), |
|
3172 | 3172 | opts.get('branch')) |
|
3173 | 3173 | other = hg.peer(repo, opts, source) |
|
3174 | 3174 | if 'bookmarks' not in other.listkeys('namespaces'): |
|
3175 | 3175 | ui.warn(_("remote doesn't support bookmarks\n")) |
|
3176 | 3176 | return 0 |
|
3177 | 3177 | ui.pager('incoming') |
|
3178 | 3178 | ui.status(_('comparing with %s\n') % util.hidepassword(source)) |
|
3179 | 3179 | return bookmarks.incoming(ui, repo, other) |
|
3180 | 3180 | |
|
3181 | 3181 | repo._subtoppath = ui.expandpath(source) |
|
3182 | 3182 | try: |
|
3183 | 3183 | return hg.incoming(ui, repo, source, opts) |
|
3184 | 3184 | finally: |
|
3185 | 3185 | del repo._subtoppath |
|
3186 | 3186 | |
|
3187 | 3187 | |
|
3188 | 3188 | @command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'), |
|
3189 | 3189 | norepo=True) |
|
3190 | 3190 | def init(ui, dest=".", **opts): |
|
3191 | 3191 | """create a new repository in the given directory |
|
3192 | 3192 | |
|
3193 | 3193 | Initialize a new repository in the given directory. If the given |
|
3194 | 3194 | directory does not exist, it will be created. |
|
3195 | 3195 | |
|
3196 | 3196 | If no directory is given, the current directory is used. |
|
3197 | 3197 | |
|
3198 | 3198 | It is possible to specify an ``ssh://`` URL as the destination. |
|
3199 | 3199 | See :hg:`help urls` for more information. |
|
3200 | 3200 | |
|
3201 | 3201 | Returns 0 on success. |
|
3202 | 3202 | """ |
|
3203 | 3203 | opts = pycompat.byteskwargs(opts) |
|
3204 | 3204 | hg.peer(ui, opts, ui.expandpath(dest), create=True) |
|
3205 | 3205 | |
|
3206 | 3206 | @command('locate', |
|
3207 | 3207 | [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')), |
|
3208 | 3208 | ('0', 'print0', None, _('end filenames with NUL, for use with xargs')), |
|
3209 | 3209 | ('f', 'fullpath', None, _('print complete paths from the filesystem root')), |
|
3210 | 3210 | ] + walkopts, |
|
3211 | 3211 | _('[OPTION]... [PATTERN]...')) |
|
3212 | 3212 | def locate(ui, repo, *pats, **opts): |
|
3213 | 3213 | """locate files matching specific patterns (DEPRECATED) |
|
3214 | 3214 | |
|
3215 | 3215 | Print files under Mercurial control in the working directory whose |
|
3216 | 3216 | names match the given patterns. |
|
3217 | 3217 | |
|
3218 | 3218 | By default, this command searches all directories in the working |
|
3219 | 3219 | directory. To search just the current directory and its |
|
3220 | 3220 | subdirectories, use "--include .". |
|
3221 | 3221 | |
|
3222 | 3222 | If no patterns are given to match, this command prints the names |
|
3223 | 3223 | of all files under Mercurial control in the working directory. |
|
3224 | 3224 | |
|
3225 | 3225 | If you want to feed the output of this command into the "xargs" |
|
3226 | 3226 | command, use the -0 option to both this command and "xargs". This |
|
3227 | 3227 | will avoid the problem of "xargs" treating single filenames that |
|
3228 | 3228 | contain whitespace as multiple filenames. |
|
3229 | 3229 | |
|
3230 | 3230 | See :hg:`help files` for a more versatile command. |
|
3231 | 3231 | |
|
3232 | 3232 | Returns 0 if a match is found, 1 otherwise. |
|
3233 | 3233 | """ |
|
3234 | 3234 | opts = pycompat.byteskwargs(opts) |
|
3235 | 3235 | if opts.get('print0'): |
|
3236 | 3236 | end = '\0' |
|
3237 | 3237 | else: |
|
3238 | 3238 | end = '\n' |
|
3239 | 3239 | rev = scmutil.revsingle(repo, opts.get('rev'), None).node() |
|
3240 | 3240 | |
|
3241 | 3241 | ret = 1 |
|
3242 | 3242 | ctx = repo[rev] |
|
3243 | 3243 | m = scmutil.match(ctx, pats, opts, default='relglob', |
|
3244 | 3244 | badfn=lambda x, y: False) |
|
3245 | 3245 | |
|
3246 | 3246 | ui.pager('locate') |
|
3247 | 3247 | for abs in ctx.matches(m): |
|
3248 | 3248 | if opts.get('fullpath'): |
|
3249 | 3249 | ui.write(repo.wjoin(abs), end) |
|
3250 | 3250 | else: |
|
3251 | 3251 | ui.write(((pats and m.rel(abs)) or abs), end) |
|
3252 | 3252 | ret = 0 |
|
3253 | 3253 | |
|
3254 | 3254 | return ret |
|
3255 | 3255 | |
|
3256 | 3256 | @command('^log|history', |
|
3257 | 3257 | [('f', 'follow', None, |
|
3258 | 3258 | _('follow changeset history, or file history across copies and renames')), |
|
3259 | 3259 | ('', 'follow-first', None, |
|
3260 | 3260 | _('only follow the first parent of merge changesets (DEPRECATED)')), |
|
3261 | 3261 | ('d', 'date', '', _('show revisions matching date spec'), _('DATE')), |
|
3262 | 3262 | ('C', 'copies', None, _('show copied files')), |
|
3263 | 3263 | ('k', 'keyword', [], |
|
3264 | 3264 | _('do case-insensitive search for a given text'), _('TEXT')), |
|
3265 | 3265 | ('r', 'rev', [], _('show the specified revision or revset'), _('REV')), |
|
3266 | 3266 | ('L', 'line-range', [], |
|
3267 | 3267 | _('follow line range of specified file (EXPERIMENTAL)'), |
|
3268 | 3268 | _('FILE,RANGE')), |
|
3269 | 3269 | ('', 'removed', None, _('include revisions where files were removed')), |
|
3270 | 3270 | ('m', 'only-merges', None, _('show only merges (DEPRECATED)')), |
|
3271 | 3271 | ('u', 'user', [], _('revisions committed by user'), _('USER')), |
|
3272 | 3272 | ('', 'only-branch', [], |
|
3273 | 3273 | _('show only changesets within the given named branch (DEPRECATED)'), |
|
3274 | 3274 | _('BRANCH')), |
|
3275 | 3275 | ('b', 'branch', [], |
|
3276 | 3276 | _('show changesets within the given named branch'), _('BRANCH')), |
|
3277 | 3277 | ('P', 'prune', [], |
|
3278 | 3278 | _('do not display revision or any of its ancestors'), _('REV')), |
|
3279 | 3279 | ] + logopts + walkopts, |
|
3280 | 3280 | _('[OPTION]... [FILE]'), |
|
3281 | 3281 | inferrepo=True, cmdtype=readonly) |
|
3282 | 3282 | def log(ui, repo, *pats, **opts): |
|
3283 | 3283 | """show revision history of entire repository or files |
|
3284 | 3284 | |
|
3285 | 3285 | Print the revision history of the specified files or the entire |
|
3286 | 3286 | project. |
|
3287 | 3287 | |
|
3288 | 3288 | If no revision range is specified, the default is ``tip:0`` unless |
|
3289 | 3289 | --follow is set, in which case the working directory parent is |
|
3290 | 3290 | used as the starting revision. |
|
3291 | 3291 | |
|
3292 | 3292 | File history is shown without following rename or copy history of |
|
3293 | 3293 | files. Use -f/--follow with a filename to follow history across |
|
3294 | 3294 | renames and copies. --follow without a filename will only show |
|
3295 | 3295 | ancestors of the starting revision. |
|
3296 | 3296 | |
|
3297 | 3297 | By default this command prints revision number and changeset id, |
|
3298 | 3298 | tags, non-trivial parents, user, date and time, and a summary for |
|
3299 | 3299 | each commit. When the -v/--verbose switch is used, the list of |
|
3300 | 3300 | changed files and full commit message are shown. |
|
3301 | 3301 | |
|
3302 | 3302 | With --graph the revisions are shown as an ASCII art DAG with the most |
|
3303 | 3303 | recent changeset at the top. |
|
3304 | 3304 | 'o' is a changeset, '@' is a working directory parent, '_' closes a branch, |
|
3305 | 3305 | 'x' is obsolete, '*' is unstable, and '+' represents a fork where the |
|
3306 | 3306 | changeset from the lines below is a parent of the 'o' merge on the same |
|
3307 | 3307 | line. |
|
3308 | 3308 | Paths in the DAG are represented with '|', '/' and so forth. ':' in place |
|
3309 | 3309 | of a '|' indicates one or more revisions in a path are omitted. |
|
3310 | 3310 | |
|
3311 | 3311 | .. container:: verbose |
|
3312 | 3312 | |
|
3313 | 3313 | Use -L/--line-range FILE,M:N options to follow the history of lines |
|
3314 | 3314 | from M to N in FILE. With -p/--patch only diff hunks affecting |
|
3315 | 3315 | specified line range will be shown. This option requires --follow; |
|
3316 | 3316 | it can be specified multiple times. Currently, this option is not |
|
3317 | 3317 | compatible with --graph. This option is experimental. |
|
3318 | 3318 | |
|
3319 | 3319 | .. note:: |
|
3320 | 3320 | |
|
3321 | 3321 | :hg:`log --patch` may generate unexpected diff output for merge |
|
3322 | 3322 | changesets, as it will only compare the merge changeset against |
|
3323 | 3323 | its first parent. Also, only files different from BOTH parents |
|
3324 | 3324 | will appear in files:. |
|
3325 | 3325 | |
|
3326 | 3326 | .. note:: |
|
3327 | 3327 | |
|
3328 | 3328 | For performance reasons, :hg:`log FILE` may omit duplicate changes |
|
3329 | 3329 | made on branches and will not show removals or mode changes. To |
|
3330 | 3330 | see all such changes, use the --removed switch. |
|
3331 | 3331 | |
|
3332 | 3332 | .. container:: verbose |
|
3333 | 3333 | |
|
3334 | 3334 | .. note:: |
|
3335 | 3335 | |
|
3336 | 3336 | The history resulting from -L/--line-range options depends on diff |
|
3337 | 3337 | options; for instance if white-spaces are ignored, respective changes |
|
3338 | 3338 | with only white-spaces in specified line range will not be listed. |
|
3339 | 3339 | |
|
3340 | 3340 | .. container:: verbose |
|
3341 | 3341 | |
|
3342 | 3342 | Some examples: |
|
3343 | 3343 | |
|
3344 | 3344 | - changesets with full descriptions and file lists:: |
|
3345 | 3345 | |
|
3346 | 3346 | hg log -v |
|
3347 | 3347 | |
|
3348 | 3348 | - changesets ancestral to the working directory:: |
|
3349 | 3349 | |
|
3350 | 3350 | hg log -f |
|
3351 | 3351 | |
|
3352 | 3352 | - last 10 commits on the current branch:: |
|
3353 | 3353 | |
|
3354 | 3354 | hg log -l 10 -b . |
|
3355 | 3355 | |
|
3356 | 3356 | - changesets showing all modifications of a file, including removals:: |
|
3357 | 3357 | |
|
3358 | 3358 | hg log --removed file.c |
|
3359 | 3359 | |
|
3360 | 3360 | - all changesets that touch a directory, with diffs, excluding merges:: |
|
3361 | 3361 | |
|
3362 | 3362 | hg log -Mp lib/ |
|
3363 | 3363 | |
|
3364 | 3364 | - all revision numbers that match a keyword:: |
|
3365 | 3365 | |
|
3366 | 3366 | hg log -k bug --template "{rev}\\n" |
|
3367 | 3367 | |
|
3368 | 3368 | - the full hash identifier of the working directory parent:: |
|
3369 | 3369 | |
|
3370 | 3370 | hg log -r . --template "{node}\\n" |
|
3371 | 3371 | |
|
3372 | 3372 | - list available log templates:: |
|
3373 | 3373 | |
|
3374 | 3374 | hg log -T list |
|
3375 | 3375 | |
|
3376 | 3376 | - check if a given changeset is included in a tagged release:: |
|
3377 | 3377 | |
|
3378 | 3378 | hg log -r "a21ccf and ancestor(1.9)" |
|
3379 | 3379 | |
|
3380 | 3380 | - find all changesets by some user in a date range:: |
|
3381 | 3381 | |
|
3382 | 3382 | hg log -k alice -d "may 2008 to jul 2008" |
|
3383 | 3383 | |
|
3384 | 3384 | - summary of all changesets after the last tag:: |
|
3385 | 3385 | |
|
3386 | 3386 | hg log -r "last(tagged())::" --template "{desc|firstline}\\n" |
|
3387 | 3387 | |
|
3388 | 3388 | - changesets touching lines 13 to 23 for file.c:: |
|
3389 | 3389 | |
|
3390 | 3390 | hg log -L file.c,13:23 |
|
3391 | 3391 | |
|
3392 | 3392 | - changesets touching lines 13 to 23 for file.c and lines 2 to 6 of |
|
3393 | 3393 | main.c with patch:: |
|
3394 | 3394 | |
|
3395 | 3395 | hg log -L file.c,13:23 -L main.c,2:6 -p |
|
3396 | 3396 | |
|
3397 | 3397 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
3398 | 3398 | |
|
3399 | 3399 | See :hg:`help revisions` for more about specifying and ordering |
|
3400 | 3400 | revisions. |
|
3401 | 3401 | |
|
3402 | 3402 | See :hg:`help templates` for more about pre-packaged styles and |
|
3403 | 3403 | specifying custom templates. The default template used by the log |
|
3404 | 3404 | command can be customized via the ``ui.logtemplate`` configuration |
|
3405 | 3405 | setting. |
|
3406 | 3406 | |
|
3407 | 3407 | Returns 0 on success. |
|
3408 | 3408 | |
|
3409 | 3409 | """ |
|
3410 | 3410 | opts = pycompat.byteskwargs(opts) |
|
3411 | 3411 | linerange = opts.get('line_range') |
|
3412 | 3412 | |
|
3413 | 3413 | if linerange and not opts.get('follow'): |
|
3414 | 3414 | raise error.Abort(_('--line-range requires --follow')) |
|
3415 | 3415 | |
|
3416 | 3416 | if linerange and pats: |
|
3417 | 3417 | # TODO: take pats as patterns with no line-range filter |
|
3418 | 3418 | raise error.Abort( |
|
3419 | 3419 | _('FILE arguments are not compatible with --line-range option') |
|
3420 | 3420 | ) |
|
3421 | 3421 | |
|
3422 | 3422 | repo = scmutil.unhidehashlikerevs(repo, opts.get('rev'), 'nowarn') |
|
3423 | 3423 | revs, differ = logcmdutil.getrevs(repo, pats, opts) |
|
3424 | 3424 | if linerange: |
|
3425 | 3425 | # TODO: should follow file history from logcmdutil._initialrevs(), |
|
3426 | 3426 | # then filter the result by logcmdutil._makerevset() and --limit |
|
3427 | 3427 | revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts) |
|
3428 | 3428 | |
|
3429 | 3429 | getrenamed = None |
|
3430 | 3430 | if opts.get('copies'): |
|
3431 | 3431 | endrev = None |
|
3432 | 3432 | if opts.get('rev'): |
|
3433 | 3433 | endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1 |
|
3434 | 3434 | getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) |
|
3435 | 3435 | |
|
3436 | 3436 | ui.pager('log') |
|
3437 | 3437 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ, |
|
3438 | 3438 | buffered=True) |
|
3439 | 3439 | if opts.get('graph'): |
|
3440 | 3440 | displayfn = logcmdutil.displaygraphrevs |
|
3441 | 3441 | else: |
|
3442 | 3442 | displayfn = logcmdutil.displayrevs |
|
3443 | 3443 | displayfn(ui, repo, revs, displayer, getrenamed) |
|
3444 | 3444 | |
|
3445 | 3445 | @command('manifest', |
|
3446 | 3446 | [('r', 'rev', '', _('revision to display'), _('REV')), |
|
3447 | 3447 | ('', 'all', False, _("list files from all revisions"))] |
|
3448 | 3448 | + formatteropts, |
|
3449 | 3449 | _('[-r REV]'), cmdtype=readonly) |
|
3450 | 3450 | def manifest(ui, repo, node=None, rev=None, **opts): |
|
3451 | 3451 | """output the current or given revision of the project manifest |
|
3452 | 3452 | |
|
3453 | 3453 | Print a list of version controlled files for the given revision. |
|
3454 | 3454 | If no revision is given, the first parent of the working directory |
|
3455 | 3455 | is used, or the null revision if no revision is checked out. |
|
3456 | 3456 | |
|
3457 | 3457 | With -v, print file permissions, symlink and executable bits. |
|
3458 | 3458 | With --debug, print file revision hashes. |
|
3459 | 3459 | |
|
3460 | 3460 | If option --all is specified, the list of all files from all revisions |
|
3461 | 3461 | is printed. This includes deleted and renamed files. |
|
3462 | 3462 | |
|
3463 | 3463 | Returns 0 on success. |
|
3464 | 3464 | """ |
|
3465 | 3465 | opts = pycompat.byteskwargs(opts) |
|
3466 | 3466 | fm = ui.formatter('manifest', opts) |
|
3467 | 3467 | |
|
3468 | 3468 | if opts.get('all'): |
|
3469 | 3469 | if rev or node: |
|
3470 | 3470 | raise error.Abort(_("can't specify a revision with --all")) |
|
3471 | 3471 | |
|
3472 | 3472 | res = [] |
|
3473 | 3473 | prefix = "data/" |
|
3474 | 3474 | suffix = ".i" |
|
3475 | 3475 | plen = len(prefix) |
|
3476 | 3476 | slen = len(suffix) |
|
3477 | 3477 | with repo.lock(): |
|
3478 | 3478 | for fn, b, size in repo.store.datafiles(): |
|
3479 | 3479 | if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix: |
|
3480 | 3480 | res.append(fn[plen:-slen]) |
|
3481 | 3481 | ui.pager('manifest') |
|
3482 | 3482 | for f in res: |
|
3483 | 3483 | fm.startitem() |
|
3484 | 3484 | fm.write("path", '%s\n', f) |
|
3485 | 3485 | fm.end() |
|
3486 | 3486 | return |
|
3487 | 3487 | |
|
3488 | 3488 | if rev and node: |
|
3489 | 3489 | raise error.Abort(_("please specify just one revision")) |
|
3490 | 3490 | |
|
3491 | 3491 | if not node: |
|
3492 | 3492 | node = rev |
|
3493 | 3493 | |
|
3494 | 3494 | char = {'l': '@', 'x': '*', '': '', 't': 'd'} |
|
3495 | 3495 | mode = {'l': '644', 'x': '755', '': '644', 't': '755'} |
|
3496 | 3496 | if node: |
|
3497 | 3497 | repo = scmutil.unhidehashlikerevs(repo, [node], 'nowarn') |
|
3498 | 3498 | ctx = scmutil.revsingle(repo, node) |
|
3499 | 3499 | mf = ctx.manifest() |
|
3500 | 3500 | ui.pager('manifest') |
|
3501 | 3501 | for f in ctx: |
|
3502 | 3502 | fm.startitem() |
|
3503 | 3503 | fl = ctx[f].flags() |
|
3504 | 3504 | fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f])) |
|
3505 | 3505 | fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl]) |
|
3506 | 3506 | fm.write('path', '%s\n', f) |
|
3507 | 3507 | fm.end() |
|
3508 | 3508 | |
|
3509 | 3509 | @command('^merge', |
|
3510 | 3510 | [('f', 'force', None, |
|
3511 | 3511 | _('force a merge including outstanding changes (DEPRECATED)')), |
|
3512 | 3512 | ('r', 'rev', '', _('revision to merge'), _('REV')), |
|
3513 | 3513 | ('P', 'preview', None, |
|
3514 | 3514 | _('review revisions to merge (no merge is performed)')), |
|
3515 | 3515 | ('', 'abort', None, _('abort the ongoing merge')), |
|
3516 | 3516 | ] + mergetoolopts, |
|
3517 | 3517 | _('[-P] [[-r] REV]')) |
|
3518 | 3518 | def merge(ui, repo, node=None, **opts): |
|
3519 | 3519 | """merge another revision into working directory |
|
3520 | 3520 | |
|
3521 | 3521 | The current working directory is updated with all changes made in |
|
3522 | 3522 | the requested revision since the last common predecessor revision. |
|
3523 | 3523 | |
|
3524 | 3524 | Files that changed between either parent are marked as changed for |
|
3525 | 3525 | the next commit and a commit must be performed before any further |
|
3526 | 3526 | updates to the repository are allowed. The next commit will have |
|
3527 | 3527 | two parents. |
|
3528 | 3528 | |
|
3529 | 3529 | ``--tool`` can be used to specify the merge tool used for file |
|
3530 | 3530 | merges. It overrides the HGMERGE environment variable and your |
|
3531 | 3531 | configuration files. See :hg:`help merge-tools` for options. |
|
3532 | 3532 | |
|
3533 | 3533 | If no revision is specified, the working directory's parent is a |
|
3534 | 3534 | head revision, and the current branch contains exactly one other |
|
3535 | 3535 | head, the other head is merged with by default. Otherwise, an |
|
3536 | 3536 | explicit revision with which to merge with must be provided. |
|
3537 | 3537 | |
|
3538 | 3538 | See :hg:`help resolve` for information on handling file conflicts. |
|
3539 | 3539 | |
|
3540 | 3540 | To undo an uncommitted merge, use :hg:`merge --abort` which |
|
3541 | 3541 | will check out a clean copy of the original merge parent, losing |
|
3542 | 3542 | all changes. |
|
3543 | 3543 | |
|
3544 | 3544 | Returns 0 on success, 1 if there are unresolved files. |
|
3545 | 3545 | """ |
|
3546 | 3546 | |
|
3547 | 3547 | opts = pycompat.byteskwargs(opts) |
|
3548 | 3548 | abort = opts.get('abort') |
|
3549 | 3549 | if abort and repo.dirstate.p2() == nullid: |
|
3550 | 3550 | cmdutil.wrongtooltocontinue(repo, _('merge')) |
|
3551 | 3551 | if abort: |
|
3552 | 3552 | if node: |
|
3553 | 3553 | raise error.Abort(_("cannot specify a node with --abort")) |
|
3554 | 3554 | if opts.get('rev'): |
|
3555 | 3555 | raise error.Abort(_("cannot specify both --rev and --abort")) |
|
3556 | 3556 | if opts.get('preview'): |
|
3557 | 3557 | raise error.Abort(_("cannot specify --preview with --abort")) |
|
3558 | 3558 | if opts.get('rev') and node: |
|
3559 | 3559 | raise error.Abort(_("please specify just one revision")) |
|
3560 | 3560 | if not node: |
|
3561 | 3561 | node = opts.get('rev') |
|
3562 | 3562 | |
|
3563 | 3563 | if node: |
|
3564 | 3564 | node = scmutil.revsingle(repo, node).node() |
|
3565 | 3565 | |
|
3566 | 3566 | if not node and not abort: |
|
3567 | 3567 | node = repo[destutil.destmerge(repo)].node() |
|
3568 | 3568 | |
|
3569 | 3569 | if opts.get('preview'): |
|
3570 | 3570 | # find nodes that are ancestors of p2 but not of p1 |
|
3571 | 3571 | p1 = repo.lookup('.') |
|
3572 | 3572 | p2 = repo.lookup(node) |
|
3573 | 3573 | nodes = repo.changelog.findmissing(common=[p1], heads=[p2]) |
|
3574 | 3574 | |
|
3575 | 3575 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
3576 | 3576 | for node in nodes: |
|
3577 | 3577 | displayer.show(repo[node]) |
|
3578 | 3578 | displayer.close() |
|
3579 | 3579 | return 0 |
|
3580 | 3580 | |
|
3581 | 3581 | try: |
|
3582 | 3582 | # ui.forcemerge is an internal variable, do not document |
|
3583 | 3583 | repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'merge') |
|
3584 | 3584 | force = opts.get('force') |
|
3585 | 3585 | labels = ['working copy', 'merge rev'] |
|
3586 | 3586 | return hg.merge(repo, node, force=force, mergeforce=force, |
|
3587 | 3587 | labels=labels, abort=abort) |
|
3588 | 3588 | finally: |
|
3589 | 3589 | ui.setconfig('ui', 'forcemerge', '', 'merge') |
|
3590 | 3590 | |
|
3591 | 3591 | @command('outgoing|out', |
|
3592 | 3592 | [('f', 'force', None, _('run even when the destination is unrelated')), |
|
3593 | 3593 | ('r', 'rev', [], |
|
3594 | 3594 | _('a changeset intended to be included in the destination'), _('REV')), |
|
3595 | 3595 | ('n', 'newest-first', None, _('show newest record first')), |
|
3596 | 3596 | ('B', 'bookmarks', False, _('compare bookmarks')), |
|
3597 | 3597 | ('b', 'branch', [], _('a specific branch you would like to push'), |
|
3598 | 3598 | _('BRANCH')), |
|
3599 | 3599 | ] + logopts + remoteopts + subrepoopts, |
|
3600 | 3600 | _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')) |
|
3601 | 3601 | def outgoing(ui, repo, dest=None, **opts): |
|
3602 | 3602 | """show changesets not found in the destination |
|
3603 | 3603 | |
|
3604 | 3604 | Show changesets not found in the specified destination repository |
|
3605 | 3605 | or the default push location. These are the changesets that would |
|
3606 | 3606 | be pushed if a push was requested. |
|
3607 | 3607 | |
|
3608 | 3608 | See pull for details of valid destination formats. |
|
3609 | 3609 | |
|
3610 | 3610 | .. container:: verbose |
|
3611 | 3611 | |
|
3612 | 3612 | With -B/--bookmarks, the result of bookmark comparison between |
|
3613 | 3613 | local and remote repositories is displayed. With -v/--verbose, |
|
3614 | 3614 | status is also displayed for each bookmark like below:: |
|
3615 | 3615 | |
|
3616 | 3616 | BM1 01234567890a added |
|
3617 | 3617 | BM2 deleted |
|
3618 | 3618 | BM3 234567890abc advanced |
|
3619 | 3619 | BM4 34567890abcd diverged |
|
3620 | 3620 | BM5 4567890abcde changed |
|
3621 | 3621 | |
|
3622 | 3622 | The action taken when pushing depends on the |
|
3623 | 3623 | status of each bookmark: |
|
3624 | 3624 | |
|
3625 | 3625 | :``added``: push with ``-B`` will create it |
|
3626 | 3626 | :``deleted``: push with ``-B`` will delete it |
|
3627 | 3627 | :``advanced``: push will update it |
|
3628 | 3628 | :``diverged``: push with ``-B`` will update it |
|
3629 | 3629 | :``changed``: push with ``-B`` will update it |
|
3630 | 3630 | |
|
3631 | 3631 | From the point of view of pushing behavior, bookmarks |
|
3632 | 3632 | existing only in the remote repository are treated as |
|
3633 | 3633 | ``deleted``, even if it is in fact added remotely. |
|
3634 | 3634 | |
|
3635 | 3635 | Returns 0 if there are outgoing changes, 1 otherwise. |
|
3636 | 3636 | """ |
|
3637 | 3637 | opts = pycompat.byteskwargs(opts) |
|
3638 | 3638 | if opts.get('graph'): |
|
3639 | 3639 | logcmdutil.checkunsupportedgraphflags([], opts) |
|
3640 | 3640 | o, other = hg._outgoing(ui, repo, dest, opts) |
|
3641 | 3641 | if not o: |
|
3642 | 3642 | cmdutil.outgoinghooks(ui, repo, other, opts, o) |
|
3643 | 3643 | return |
|
3644 | 3644 | |
|
3645 | 3645 | revdag = logcmdutil.graphrevs(repo, o, opts) |
|
3646 | 3646 | ui.pager('outgoing') |
|
3647 | 3647 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts, buffered=True) |
|
3648 | 3648 | logcmdutil.displaygraph(ui, repo, revdag, displayer, |
|
3649 | 3649 | graphmod.asciiedges) |
|
3650 | 3650 | cmdutil.outgoinghooks(ui, repo, other, opts, o) |
|
3651 | 3651 | return 0 |
|
3652 | 3652 | |
|
3653 | 3653 | if opts.get('bookmarks'): |
|
3654 | 3654 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
3655 | 3655 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
3656 | 3656 | other = hg.peer(repo, opts, dest) |
|
3657 | 3657 | if 'bookmarks' not in other.listkeys('namespaces'): |
|
3658 | 3658 | ui.warn(_("remote doesn't support bookmarks\n")) |
|
3659 | 3659 | return 0 |
|
3660 | 3660 | ui.status(_('comparing with %s\n') % util.hidepassword(dest)) |
|
3661 | 3661 | ui.pager('outgoing') |
|
3662 | 3662 | return bookmarks.outgoing(ui, repo, other) |
|
3663 | 3663 | |
|
3664 | 3664 | repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default') |
|
3665 | 3665 | try: |
|
3666 | 3666 | return hg.outgoing(ui, repo, dest, opts) |
|
3667 | 3667 | finally: |
|
3668 | 3668 | del repo._subtoppath |
|
3669 | 3669 | |
|
3670 | 3670 | @command('parents', |
|
3671 | 3671 | [('r', 'rev', '', _('show parents of the specified revision'), _('REV')), |
|
3672 | 3672 | ] + templateopts, |
|
3673 | 3673 | _('[-r REV] [FILE]'), |
|
3674 | 3674 | inferrepo=True) |
|
3675 | 3675 | def parents(ui, repo, file_=None, **opts): |
|
3676 | 3676 | """show the parents of the working directory or revision (DEPRECATED) |
|
3677 | 3677 | |
|
3678 | 3678 | Print the working directory's parent revisions. If a revision is |
|
3679 | 3679 | given via -r/--rev, the parent of that revision will be printed. |
|
3680 | 3680 | If a file argument is given, the revision in which the file was |
|
3681 | 3681 | last changed (before the working directory revision or the |
|
3682 | 3682 | argument to --rev if given) is printed. |
|
3683 | 3683 | |
|
3684 | 3684 | This command is equivalent to:: |
|
3685 | 3685 | |
|
3686 | 3686 | hg log -r "p1()+p2()" or |
|
3687 | 3687 | hg log -r "p1(REV)+p2(REV)" or |
|
3688 | 3688 | hg log -r "max(::p1() and file(FILE))+max(::p2() and file(FILE))" or |
|
3689 | 3689 | hg log -r "max(::p1(REV) and file(FILE))+max(::p2(REV) and file(FILE))" |
|
3690 | 3690 | |
|
3691 | 3691 | See :hg:`summary` and :hg:`help revsets` for related information. |
|
3692 | 3692 | |
|
3693 | 3693 | Returns 0 on success. |
|
3694 | 3694 | """ |
|
3695 | 3695 | |
|
3696 | 3696 | opts = pycompat.byteskwargs(opts) |
|
3697 | 3697 | rev = opts.get('rev') |
|
3698 | 3698 | if rev: |
|
3699 | 3699 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
3700 | 3700 | ctx = scmutil.revsingle(repo, rev, None) |
|
3701 | 3701 | |
|
3702 | 3702 | if file_: |
|
3703 | 3703 | m = scmutil.match(ctx, (file_,), opts) |
|
3704 | 3704 | if m.anypats() or len(m.files()) != 1: |
|
3705 | 3705 | raise error.Abort(_('can only specify an explicit filename')) |
|
3706 | 3706 | file_ = m.files()[0] |
|
3707 | 3707 | filenodes = [] |
|
3708 | 3708 | for cp in ctx.parents(): |
|
3709 | 3709 | if not cp: |
|
3710 | 3710 | continue |
|
3711 | 3711 | try: |
|
3712 | 3712 | filenodes.append(cp.filenode(file_)) |
|
3713 | 3713 | except error.LookupError: |
|
3714 | 3714 | pass |
|
3715 | 3715 | if not filenodes: |
|
3716 | 3716 | raise error.Abort(_("'%s' not found in manifest!") % file_) |
|
3717 | 3717 | p = [] |
|
3718 | 3718 | for fn in filenodes: |
|
3719 | 3719 | fctx = repo.filectx(file_, fileid=fn) |
|
3720 | 3720 | p.append(fctx.node()) |
|
3721 | 3721 | else: |
|
3722 | 3722 | p = [cp.node() for cp in ctx.parents()] |
|
3723 | 3723 | |
|
3724 | 3724 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
3725 | 3725 | for n in p: |
|
3726 | 3726 | if n != nullid: |
|
3727 | 3727 | displayer.show(repo[n]) |
|
3728 | 3728 | displayer.close() |
|
3729 | 3729 | |
|
3730 | 3730 | @command('paths', formatteropts, _('[NAME]'), optionalrepo=True, |
|
3731 | 3731 | cmdtype=readonly) |
|
3732 | 3732 | def paths(ui, repo, search=None, **opts): |
|
3733 | 3733 | """show aliases for remote repositories |
|
3734 | 3734 | |
|
3735 | 3735 | Show definition of symbolic path name NAME. If no name is given, |
|
3736 | 3736 | show definition of all available names. |
|
3737 | 3737 | |
|
3738 | 3738 | Option -q/--quiet suppresses all output when searching for NAME |
|
3739 | 3739 | and shows only the path names when listing all definitions. |
|
3740 | 3740 | |
|
3741 | 3741 | Path names are defined in the [paths] section of your |
|
3742 | 3742 | configuration file and in ``/etc/mercurial/hgrc``. If run inside a |
|
3743 | 3743 | repository, ``.hg/hgrc`` is used, too. |
|
3744 | 3744 | |
|
3745 | 3745 | The path names ``default`` and ``default-push`` have a special |
|
3746 | 3746 | meaning. When performing a push or pull operation, they are used |
|
3747 | 3747 | as fallbacks if no location is specified on the command-line. |
|
3748 | 3748 | When ``default-push`` is set, it will be used for push and |
|
3749 | 3749 | ``default`` will be used for pull; otherwise ``default`` is used |
|
3750 | 3750 | as the fallback for both. When cloning a repository, the clone |
|
3751 | 3751 | source is written as ``default`` in ``.hg/hgrc``. |
|
3752 | 3752 | |
|
3753 | 3753 | .. note:: |
|
3754 | 3754 | |
|
3755 | 3755 | ``default`` and ``default-push`` apply to all inbound (e.g. |
|
3756 | 3756 | :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` |
|
3757 | 3757 | and :hg:`bundle`) operations. |
|
3758 | 3758 | |
|
3759 | 3759 | See :hg:`help urls` for more information. |
|
3760 | 3760 | |
|
3761 | 3761 | Returns 0 on success. |
|
3762 | 3762 | """ |
|
3763 | 3763 | |
|
3764 | 3764 | opts = pycompat.byteskwargs(opts) |
|
3765 | 3765 | ui.pager('paths') |
|
3766 | 3766 | if search: |
|
3767 | 3767 | pathitems = [(name, path) for name, path in ui.paths.iteritems() |
|
3768 | 3768 | if name == search] |
|
3769 | 3769 | else: |
|
3770 | 3770 | pathitems = sorted(ui.paths.iteritems()) |
|
3771 | 3771 | |
|
3772 | 3772 | fm = ui.formatter('paths', opts) |
|
3773 | 3773 | if fm.isplain(): |
|
3774 | 3774 | hidepassword = util.hidepassword |
|
3775 | 3775 | else: |
|
3776 | 3776 | hidepassword = str |
|
3777 | 3777 | if ui.quiet: |
|
3778 | 3778 | namefmt = '%s\n' |
|
3779 | 3779 | else: |
|
3780 | 3780 | namefmt = '%s = ' |
|
3781 | 3781 | showsubopts = not search and not ui.quiet |
|
3782 | 3782 | |
|
3783 | 3783 | for name, path in pathitems: |
|
3784 | 3784 | fm.startitem() |
|
3785 | 3785 | fm.condwrite(not search, 'name', namefmt, name) |
|
3786 | 3786 | fm.condwrite(not ui.quiet, 'url', '%s\n', hidepassword(path.rawloc)) |
|
3787 | 3787 | for subopt, value in sorted(path.suboptions.items()): |
|
3788 | 3788 | assert subopt not in ('name', 'url') |
|
3789 | 3789 | if showsubopts: |
|
3790 | 3790 | fm.plain('%s:%s = ' % (name, subopt)) |
|
3791 | 3791 | fm.condwrite(showsubopts, subopt, '%s\n', value) |
|
3792 | 3792 | |
|
3793 | 3793 | fm.end() |
|
3794 | 3794 | |
|
3795 | 3795 | if search and not pathitems: |
|
3796 | 3796 | if not ui.quiet: |
|
3797 | 3797 | ui.warn(_("not found!\n")) |
|
3798 | 3798 | return 1 |
|
3799 | 3799 | else: |
|
3800 | 3800 | return 0 |
|
3801 | 3801 | |
|
3802 | 3802 | @command('phase', |
|
3803 | 3803 | [('p', 'public', False, _('set changeset phase to public')), |
|
3804 | 3804 | ('d', 'draft', False, _('set changeset phase to draft')), |
|
3805 | 3805 | ('s', 'secret', False, _('set changeset phase to secret')), |
|
3806 | 3806 | ('f', 'force', False, _('allow to move boundary backward')), |
|
3807 | 3807 | ('r', 'rev', [], _('target revision'), _('REV')), |
|
3808 | 3808 | ], |
|
3809 | 3809 | _('[-p|-d|-s] [-f] [-r] [REV...]')) |
|
3810 | 3810 | def phase(ui, repo, *revs, **opts): |
|
3811 | 3811 | """set or show the current phase name |
|
3812 | 3812 | |
|
3813 | 3813 | With no argument, show the phase name of the current revision(s). |
|
3814 | 3814 | |
|
3815 | 3815 | With one of -p/--public, -d/--draft or -s/--secret, change the |
|
3816 | 3816 | phase value of the specified revisions. |
|
3817 | 3817 | |
|
3818 | 3818 | Unless -f/--force is specified, :hg:`phase` won't move changesets from a |
|
3819 | 3819 | lower phase to a higher phase. Phases are ordered as follows:: |
|
3820 | 3820 | |
|
3821 | 3821 | public < draft < secret |
|
3822 | 3822 | |
|
3823 | 3823 | Returns 0 on success, 1 if some phases could not be changed. |
|
3824 | 3824 | |
|
3825 | 3825 | (For more information about the phases concept, see :hg:`help phases`.) |
|
3826 | 3826 | """ |
|
3827 | 3827 | opts = pycompat.byteskwargs(opts) |
|
3828 | 3828 | # search for a unique phase argument |
|
3829 | 3829 | targetphase = None |
|
3830 | 3830 | for idx, name in enumerate(phases.phasenames): |
|
3831 | 3831 | if opts[name]: |
|
3832 | 3832 | if targetphase is not None: |
|
3833 | 3833 | raise error.Abort(_('only one phase can be specified')) |
|
3834 | 3834 | targetphase = idx |
|
3835 | 3835 | |
|
3836 | 3836 | # look for specified revision |
|
3837 | 3837 | revs = list(revs) |
|
3838 | 3838 | revs.extend(opts['rev']) |
|
3839 | 3839 | if not revs: |
|
3840 | 3840 | # display both parents as the second parent phase can influence |
|
3841 | 3841 | # the phase of a merge commit |
|
3842 | 3842 | revs = [c.rev() for c in repo[None].parents()] |
|
3843 | 3843 | |
|
3844 | 3844 | revs = scmutil.revrange(repo, revs) |
|
3845 | 3845 | |
|
3846 | 3846 | ret = 0 |
|
3847 | 3847 | if targetphase is None: |
|
3848 | 3848 | # display |
|
3849 | 3849 | for r in revs: |
|
3850 | 3850 | ctx = repo[r] |
|
3851 | 3851 | ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr())) |
|
3852 | 3852 | else: |
|
3853 | 3853 | with repo.lock(), repo.transaction("phase") as tr: |
|
3854 | 3854 | # set phase |
|
3855 | 3855 | if not revs: |
|
3856 | 3856 | raise error.Abort(_('empty revision set')) |
|
3857 | 3857 | nodes = [repo[r].node() for r in revs] |
|
3858 | 3858 | # moving revision from public to draft may hide them |
|
3859 | 3859 | # We have to check result on an unfiltered repository |
|
3860 | 3860 | unfi = repo.unfiltered() |
|
3861 | 3861 | getphase = unfi._phasecache.phase |
|
3862 | 3862 | olddata = [getphase(unfi, r) for r in unfi] |
|
3863 | 3863 | phases.advanceboundary(repo, tr, targetphase, nodes) |
|
3864 | 3864 | if opts['force']: |
|
3865 | 3865 | phases.retractboundary(repo, tr, targetphase, nodes) |
|
3866 | 3866 | getphase = unfi._phasecache.phase |
|
3867 | 3867 | newdata = [getphase(unfi, r) for r in unfi] |
|
3868 | 3868 | changes = sum(newdata[r] != olddata[r] for r in unfi) |
|
3869 | 3869 | cl = unfi.changelog |
|
3870 | 3870 | rejected = [n for n in nodes |
|
3871 | 3871 | if newdata[cl.rev(n)] < targetphase] |
|
3872 | 3872 | if rejected: |
|
3873 | 3873 | ui.warn(_('cannot move %i changesets to a higher ' |
|
3874 | 3874 | 'phase, use --force\n') % len(rejected)) |
|
3875 | 3875 | ret = 1 |
|
3876 | 3876 | if changes: |
|
3877 | 3877 | msg = _('phase changed for %i changesets\n') % changes |
|
3878 | 3878 | if ret: |
|
3879 | 3879 | ui.status(msg) |
|
3880 | 3880 | else: |
|
3881 | 3881 | ui.note(msg) |
|
3882 | 3882 | else: |
|
3883 | 3883 | ui.warn(_('no phases changed\n')) |
|
3884 | 3884 | return ret |
|
3885 | 3885 | |
|
3886 | 3886 | def postincoming(ui, repo, modheads, optupdate, checkout, brev): |
|
3887 | 3887 | """Run after a changegroup has been added via pull/unbundle |
|
3888 | 3888 | |
|
3889 | 3889 | This takes arguments below: |
|
3890 | 3890 | |
|
3891 | 3891 | :modheads: change of heads by pull/unbundle |
|
3892 | 3892 | :optupdate: updating working directory is needed or not |
|
3893 | 3893 | :checkout: update destination revision (or None to default destination) |
|
3894 | 3894 | :brev: a name, which might be a bookmark to be activated after updating |
|
3895 | 3895 | """ |
|
3896 | 3896 | if modheads == 0: |
|
3897 | 3897 | return |
|
3898 | 3898 | if optupdate: |
|
3899 | 3899 | try: |
|
3900 | 3900 | return hg.updatetotally(ui, repo, checkout, brev) |
|
3901 | 3901 | except error.UpdateAbort as inst: |
|
3902 | msg = _("not updating: %s") % str(inst) | |
|
3902 | msg = _("not updating: %s") % util.forcebytestr(inst) | |
|
3903 | 3903 | hint = inst.hint |
|
3904 | 3904 | raise error.UpdateAbort(msg, hint=hint) |
|
3905 | 3905 | if modheads > 1: |
|
3906 | 3906 | currentbranchheads = len(repo.branchheads()) |
|
3907 | 3907 | if currentbranchheads == modheads: |
|
3908 | 3908 | ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n")) |
|
3909 | 3909 | elif currentbranchheads > 1: |
|
3910 | 3910 | ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to " |
|
3911 | 3911 | "merge)\n")) |
|
3912 | 3912 | else: |
|
3913 | 3913 | ui.status(_("(run 'hg heads' to see heads)\n")) |
|
3914 | 3914 | elif not ui.configbool('commands', 'update.requiredest'): |
|
3915 | 3915 | ui.status(_("(run 'hg update' to get a working copy)\n")) |
|
3916 | 3916 | |
|
3917 | 3917 | @command('^pull', |
|
3918 | 3918 | [('u', 'update', None, |
|
3919 | 3919 | _('update to new branch head if new descendants were pulled')), |
|
3920 | 3920 | ('f', 'force', None, _('run even when remote repository is unrelated')), |
|
3921 | 3921 | ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')), |
|
3922 | 3922 | ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')), |
|
3923 | 3923 | ('b', 'branch', [], _('a specific branch you would like to pull'), |
|
3924 | 3924 | _('BRANCH')), |
|
3925 | 3925 | ] + remoteopts, |
|
3926 | 3926 | _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')) |
|
3927 | 3927 | def pull(ui, repo, source="default", **opts): |
|
3928 | 3928 | """pull changes from the specified source |
|
3929 | 3929 | |
|
3930 | 3930 | Pull changes from a remote repository to a local one. |
|
3931 | 3931 | |
|
3932 | 3932 | This finds all changes from the repository at the specified path |
|
3933 | 3933 | or URL and adds them to a local repository (the current one unless |
|
3934 | 3934 | -R is specified). By default, this does not update the copy of the |
|
3935 | 3935 | project in the working directory. |
|
3936 | 3936 | |
|
3937 | 3937 | Use :hg:`incoming` if you want to see what would have been added |
|
3938 | 3938 | by a pull at the time you issued this command. If you then decide |
|
3939 | 3939 | to add those changes to the repository, you should use :hg:`pull |
|
3940 | 3940 | -r X` where ``X`` is the last changeset listed by :hg:`incoming`. |
|
3941 | 3941 | |
|
3942 | 3942 | If SOURCE is omitted, the 'default' path will be used. |
|
3943 | 3943 | See :hg:`help urls` for more information. |
|
3944 | 3944 | |
|
3945 | 3945 | Specifying bookmark as ``.`` is equivalent to specifying the active |
|
3946 | 3946 | bookmark's name. |
|
3947 | 3947 | |
|
3948 | 3948 | Returns 0 on success, 1 if an update had unresolved files. |
|
3949 | 3949 | """ |
|
3950 | 3950 | |
|
3951 | 3951 | opts = pycompat.byteskwargs(opts) |
|
3952 | 3952 | if ui.configbool('commands', 'update.requiredest') and opts.get('update'): |
|
3953 | 3953 | msg = _('update destination required by configuration') |
|
3954 | 3954 | hint = _('use hg pull followed by hg update DEST') |
|
3955 | 3955 | raise error.Abort(msg, hint=hint) |
|
3956 | 3956 | |
|
3957 | 3957 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) |
|
3958 | 3958 | ui.status(_('pulling from %s\n') % util.hidepassword(source)) |
|
3959 | 3959 | other = hg.peer(repo, opts, source) |
|
3960 | 3960 | try: |
|
3961 | 3961 | revs, checkout = hg.addbranchrevs(repo, other, branches, |
|
3962 | 3962 | opts.get('rev')) |
|
3963 | 3963 | |
|
3964 | 3964 | |
|
3965 | 3965 | pullopargs = {} |
|
3966 | 3966 | if opts.get('bookmark'): |
|
3967 | 3967 | if not revs: |
|
3968 | 3968 | revs = [] |
|
3969 | 3969 | # The list of bookmark used here is not the one used to actually |
|
3970 | 3970 | # update the bookmark name. This can result in the revision pulled |
|
3971 | 3971 | # not ending up with the name of the bookmark because of a race |
|
3972 | 3972 | # condition on the server. (See issue 4689 for details) |
|
3973 | 3973 | remotebookmarks = other.listkeys('bookmarks') |
|
3974 | 3974 | remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks) |
|
3975 | 3975 | pullopargs['remotebookmarks'] = remotebookmarks |
|
3976 | 3976 | for b in opts['bookmark']: |
|
3977 | 3977 | b = repo._bookmarks.expandname(b) |
|
3978 | 3978 | if b not in remotebookmarks: |
|
3979 | 3979 | raise error.Abort(_('remote bookmark %s not found!') % b) |
|
3980 | 3980 | revs.append(hex(remotebookmarks[b])) |
|
3981 | 3981 | |
|
3982 | 3982 | if revs: |
|
3983 | 3983 | try: |
|
3984 | 3984 | # When 'rev' is a bookmark name, we cannot guarantee that it |
|
3985 | 3985 | # will be updated with that name because of a race condition |
|
3986 | 3986 | # server side. (See issue 4689 for details) |
|
3987 | 3987 | oldrevs = revs |
|
3988 | 3988 | revs = [] # actually, nodes |
|
3989 | 3989 | for r in oldrevs: |
|
3990 | 3990 | node = other.lookup(r) |
|
3991 | 3991 | revs.append(node) |
|
3992 | 3992 | if r == checkout: |
|
3993 | 3993 | checkout = node |
|
3994 | 3994 | except error.CapabilityError: |
|
3995 | 3995 | err = _("other repository doesn't support revision lookup, " |
|
3996 | 3996 | "so a rev cannot be specified.") |
|
3997 | 3997 | raise error.Abort(err) |
|
3998 | 3998 | |
|
3999 | 3999 | wlock = util.nullcontextmanager() |
|
4000 | 4000 | if opts.get('update'): |
|
4001 | 4001 | wlock = repo.wlock() |
|
4002 | 4002 | with wlock: |
|
4003 | 4003 | pullopargs.update(opts.get('opargs', {})) |
|
4004 | 4004 | modheads = exchange.pull(repo, other, heads=revs, |
|
4005 | 4005 | force=opts.get('force'), |
|
4006 | 4006 | bookmarks=opts.get('bookmark', ()), |
|
4007 | 4007 | opargs=pullopargs).cgresult |
|
4008 | 4008 | |
|
4009 | 4009 | # brev is a name, which might be a bookmark to be activated at |
|
4010 | 4010 | # the end of the update. In other words, it is an explicit |
|
4011 | 4011 | # destination of the update |
|
4012 | 4012 | brev = None |
|
4013 | 4013 | |
|
4014 | 4014 | if checkout: |
|
4015 | 4015 | checkout = "%d" % repo.changelog.rev(checkout) |
|
4016 | 4016 | |
|
4017 | 4017 | # order below depends on implementation of |
|
4018 | 4018 | # hg.addbranchrevs(). opts['bookmark'] is ignored, |
|
4019 | 4019 | # because 'checkout' is determined without it. |
|
4020 | 4020 | if opts.get('rev'): |
|
4021 | 4021 | brev = opts['rev'][0] |
|
4022 | 4022 | elif opts.get('branch'): |
|
4023 | 4023 | brev = opts['branch'][0] |
|
4024 | 4024 | else: |
|
4025 | 4025 | brev = branches[0] |
|
4026 | 4026 | repo._subtoppath = source |
|
4027 | 4027 | try: |
|
4028 | 4028 | ret = postincoming(ui, repo, modheads, opts.get('update'), |
|
4029 | 4029 | checkout, brev) |
|
4030 | 4030 | |
|
4031 | 4031 | finally: |
|
4032 | 4032 | del repo._subtoppath |
|
4033 | 4033 | |
|
4034 | 4034 | finally: |
|
4035 | 4035 | other.close() |
|
4036 | 4036 | return ret |
|
4037 | 4037 | |
|
4038 | 4038 | @command('^push', |
|
4039 | 4039 | [('f', 'force', None, _('force push')), |
|
4040 | 4040 | ('r', 'rev', [], |
|
4041 | 4041 | _('a changeset intended to be included in the destination'), |
|
4042 | 4042 | _('REV')), |
|
4043 | 4043 | ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')), |
|
4044 | 4044 | ('b', 'branch', [], |
|
4045 | 4045 | _('a specific branch you would like to push'), _('BRANCH')), |
|
4046 | 4046 | ('', 'new-branch', False, _('allow pushing a new branch')), |
|
4047 | 4047 | ('', 'pushvars', [], _('variables that can be sent to server (ADVANCED)')), |
|
4048 | 4048 | ] + remoteopts, |
|
4049 | 4049 | _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')) |
|
4050 | 4050 | def push(ui, repo, dest=None, **opts): |
|
4051 | 4051 | """push changes to the specified destination |
|
4052 | 4052 | |
|
4053 | 4053 | Push changesets from the local repository to the specified |
|
4054 | 4054 | destination. |
|
4055 | 4055 | |
|
4056 | 4056 | This operation is symmetrical to pull: it is identical to a pull |
|
4057 | 4057 | in the destination repository from the current one. |
|
4058 | 4058 | |
|
4059 | 4059 | By default, push will not allow creation of new heads at the |
|
4060 | 4060 | destination, since multiple heads would make it unclear which head |
|
4061 | 4061 | to use. In this situation, it is recommended to pull and merge |
|
4062 | 4062 | before pushing. |
|
4063 | 4063 | |
|
4064 | 4064 | Use --new-branch if you want to allow push to create a new named |
|
4065 | 4065 | branch that is not present at the destination. This allows you to |
|
4066 | 4066 | only create a new branch without forcing other changes. |
|
4067 | 4067 | |
|
4068 | 4068 | .. note:: |
|
4069 | 4069 | |
|
4070 | 4070 | Extra care should be taken with the -f/--force option, |
|
4071 | 4071 | which will push all new heads on all branches, an action which will |
|
4072 | 4072 | almost always cause confusion for collaborators. |
|
4073 | 4073 | |
|
4074 | 4074 | If -r/--rev is used, the specified revision and all its ancestors |
|
4075 | 4075 | will be pushed to the remote repository. |
|
4076 | 4076 | |
|
4077 | 4077 | If -B/--bookmark is used, the specified bookmarked revision, its |
|
4078 | 4078 | ancestors, and the bookmark will be pushed to the remote |
|
4079 | 4079 | repository. Specifying ``.`` is equivalent to specifying the active |
|
4080 | 4080 | bookmark's name. |
|
4081 | 4081 | |
|
4082 | 4082 | Please see :hg:`help urls` for important details about ``ssh://`` |
|
4083 | 4083 | URLs. If DESTINATION is omitted, a default path will be used. |
|
4084 | 4084 | |
|
4085 | 4085 | .. container:: verbose |
|
4086 | 4086 | |
|
4087 | 4087 | The --pushvars option sends strings to the server that become |
|
4088 | 4088 | environment variables prepended with ``HG_USERVAR_``. For example, |
|
4089 | 4089 | ``--pushvars ENABLE_FEATURE=true``, provides the server side hooks with |
|
4090 | 4090 | ``HG_USERVAR_ENABLE_FEATURE=true`` as part of their environment. |
|
4091 | 4091 | |
|
4092 | 4092 | pushvars can provide for user-overridable hooks as well as set debug |
|
4093 | 4093 | levels. One example is having a hook that blocks commits containing |
|
4094 | 4094 | conflict markers, but enables the user to override the hook if the file |
|
4095 | 4095 | is using conflict markers for testing purposes or the file format has |
|
4096 | 4096 | strings that look like conflict markers. |
|
4097 | 4097 | |
|
4098 | 4098 | By default, servers will ignore `--pushvars`. To enable it add the |
|
4099 | 4099 | following to your configuration file:: |
|
4100 | 4100 | |
|
4101 | 4101 | [push] |
|
4102 | 4102 | pushvars.server = true |
|
4103 | 4103 | |
|
4104 | 4104 | Returns 0 if push was successful, 1 if nothing to push. |
|
4105 | 4105 | """ |
|
4106 | 4106 | |
|
4107 | 4107 | opts = pycompat.byteskwargs(opts) |
|
4108 | 4108 | if opts.get('bookmark'): |
|
4109 | 4109 | ui.setconfig('bookmarks', 'pushing', opts['bookmark'], 'push') |
|
4110 | 4110 | for b in opts['bookmark']: |
|
4111 | 4111 | # translate -B options to -r so changesets get pushed |
|
4112 | 4112 | b = repo._bookmarks.expandname(b) |
|
4113 | 4113 | if b in repo._bookmarks: |
|
4114 | 4114 | opts.setdefault('rev', []).append(b) |
|
4115 | 4115 | else: |
|
4116 | 4116 | # if we try to push a deleted bookmark, translate it to null |
|
4117 | 4117 | # this lets simultaneous -r, -b options continue working |
|
4118 | 4118 | opts.setdefault('rev', []).append("null") |
|
4119 | 4119 | |
|
4120 | 4120 | path = ui.paths.getpath(dest, default=('default-push', 'default')) |
|
4121 | 4121 | if not path: |
|
4122 | 4122 | raise error.Abort(_('default repository not configured!'), |
|
4123 | 4123 | hint=_("see 'hg help config.paths'")) |
|
4124 | 4124 | dest = path.pushloc or path.loc |
|
4125 | 4125 | branches = (path.branch, opts.get('branch') or []) |
|
4126 | 4126 | ui.status(_('pushing to %s\n') % util.hidepassword(dest)) |
|
4127 | 4127 | revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) |
|
4128 | 4128 | other = hg.peer(repo, opts, dest) |
|
4129 | 4129 | |
|
4130 | 4130 | if revs: |
|
4131 | 4131 | revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)] |
|
4132 | 4132 | if not revs: |
|
4133 | 4133 | raise error.Abort(_("specified revisions evaluate to an empty set"), |
|
4134 | 4134 | hint=_("use different revision arguments")) |
|
4135 | 4135 | elif path.pushrev: |
|
4136 | 4136 | # It doesn't make any sense to specify ancestor revisions. So limit |
|
4137 | 4137 | # to DAG heads to make discovery simpler. |
|
4138 | 4138 | expr = revsetlang.formatspec('heads(%r)', path.pushrev) |
|
4139 | 4139 | revs = scmutil.revrange(repo, [expr]) |
|
4140 | 4140 | revs = [repo[rev].node() for rev in revs] |
|
4141 | 4141 | if not revs: |
|
4142 | 4142 | raise error.Abort(_('default push revset for path evaluates to an ' |
|
4143 | 4143 | 'empty set')) |
|
4144 | 4144 | |
|
4145 | 4145 | repo._subtoppath = dest |
|
4146 | 4146 | try: |
|
4147 | 4147 | # push subrepos depth-first for coherent ordering |
|
4148 | 4148 | c = repo[''] |
|
4149 | 4149 | subs = c.substate # only repos that are committed |
|
4150 | 4150 | for s in sorted(subs): |
|
4151 | 4151 | result = c.sub(s).push(opts) |
|
4152 | 4152 | if result == 0: |
|
4153 | 4153 | return not result |
|
4154 | 4154 | finally: |
|
4155 | 4155 | del repo._subtoppath |
|
4156 | 4156 | |
|
4157 | 4157 | opargs = dict(opts.get('opargs', {})) # copy opargs since we may mutate it |
|
4158 | 4158 | opargs.setdefault('pushvars', []).extend(opts.get('pushvars', [])) |
|
4159 | 4159 | |
|
4160 | 4160 | pushop = exchange.push(repo, other, opts.get('force'), revs=revs, |
|
4161 | 4161 | newbranch=opts.get('new_branch'), |
|
4162 | 4162 | bookmarks=opts.get('bookmark', ()), |
|
4163 | 4163 | opargs=opargs) |
|
4164 | 4164 | |
|
4165 | 4165 | result = not pushop.cgresult |
|
4166 | 4166 | |
|
4167 | 4167 | if pushop.bkresult is not None: |
|
4168 | 4168 | if pushop.bkresult == 2: |
|
4169 | 4169 | result = 2 |
|
4170 | 4170 | elif not result and pushop.bkresult: |
|
4171 | 4171 | result = 2 |
|
4172 | 4172 | |
|
4173 | 4173 | return result |
|
4174 | 4174 | |
|
4175 | 4175 | @command('recover', []) |
|
4176 | 4176 | def recover(ui, repo): |
|
4177 | 4177 | """roll back an interrupted transaction |
|
4178 | 4178 | |
|
4179 | 4179 | Recover from an interrupted commit or pull. |
|
4180 | 4180 | |
|
4181 | 4181 | This command tries to fix the repository status after an |
|
4182 | 4182 | interrupted operation. It should only be necessary when Mercurial |
|
4183 | 4183 | suggests it. |
|
4184 | 4184 | |
|
4185 | 4185 | Returns 0 if successful, 1 if nothing to recover or verify fails. |
|
4186 | 4186 | """ |
|
4187 | 4187 | if repo.recover(): |
|
4188 | 4188 | return hg.verify(repo) |
|
4189 | 4189 | return 1 |
|
4190 | 4190 | |
|
4191 | 4191 | @command('^remove|rm', |
|
4192 | 4192 | [('A', 'after', None, _('record delete for missing files')), |
|
4193 | 4193 | ('f', 'force', None, |
|
4194 | 4194 | _('forget added files, delete modified files')), |
|
4195 | 4195 | ] + subrepoopts + walkopts, |
|
4196 | 4196 | _('[OPTION]... FILE...'), |
|
4197 | 4197 | inferrepo=True) |
|
4198 | 4198 | def remove(ui, repo, *pats, **opts): |
|
4199 | 4199 | """remove the specified files on the next commit |
|
4200 | 4200 | |
|
4201 | 4201 | Schedule the indicated files for removal from the current branch. |
|
4202 | 4202 | |
|
4203 | 4203 | This command schedules the files to be removed at the next commit. |
|
4204 | 4204 | To undo a remove before that, see :hg:`revert`. To undo added |
|
4205 | 4205 | files, see :hg:`forget`. |
|
4206 | 4206 | |
|
4207 | 4207 | .. container:: verbose |
|
4208 | 4208 | |
|
4209 | 4209 | -A/--after can be used to remove only files that have already |
|
4210 | 4210 | been deleted, -f/--force can be used to force deletion, and -Af |
|
4211 | 4211 | can be used to remove files from the next revision without |
|
4212 | 4212 | deleting them from the working directory. |
|
4213 | 4213 | |
|
4214 | 4214 | The following table details the behavior of remove for different |
|
4215 | 4215 | file states (columns) and option combinations (rows). The file |
|
4216 | 4216 | states are Added [A], Clean [C], Modified [M] and Missing [!] |
|
4217 | 4217 | (as reported by :hg:`status`). The actions are Warn, Remove |
|
4218 | 4218 | (from branch) and Delete (from disk): |
|
4219 | 4219 | |
|
4220 | 4220 | ========= == == == == |
|
4221 | 4221 | opt/state A C M ! |
|
4222 | 4222 | ========= == == == == |
|
4223 | 4223 | none W RD W R |
|
4224 | 4224 | -f R RD RD R |
|
4225 | 4225 | -A W W W R |
|
4226 | 4226 | -Af R R R R |
|
4227 | 4227 | ========= == == == == |
|
4228 | 4228 | |
|
4229 | 4229 | .. note:: |
|
4230 | 4230 | |
|
4231 | 4231 | :hg:`remove` never deletes files in Added [A] state from the |
|
4232 | 4232 | working directory, not even if ``--force`` is specified. |
|
4233 | 4233 | |
|
4234 | 4234 | Returns 0 on success, 1 if any warnings encountered. |
|
4235 | 4235 | """ |
|
4236 | 4236 | |
|
4237 | 4237 | opts = pycompat.byteskwargs(opts) |
|
4238 | 4238 | after, force = opts.get('after'), opts.get('force') |
|
4239 | 4239 | if not pats and not after: |
|
4240 | 4240 | raise error.Abort(_('no files specified')) |
|
4241 | 4241 | |
|
4242 | 4242 | m = scmutil.match(repo[None], pats, opts) |
|
4243 | 4243 | subrepos = opts.get('subrepos') |
|
4244 | 4244 | return cmdutil.remove(ui, repo, m, "", after, force, subrepos) |
|
4245 | 4245 | |
|
4246 | 4246 | @command('rename|move|mv', |
|
4247 | 4247 | [('A', 'after', None, _('record a rename that has already occurred')), |
|
4248 | 4248 | ('f', 'force', None, _('forcibly copy over an existing managed file')), |
|
4249 | 4249 | ] + walkopts + dryrunopts, |
|
4250 | 4250 | _('[OPTION]... SOURCE... DEST')) |
|
4251 | 4251 | def rename(ui, repo, *pats, **opts): |
|
4252 | 4252 | """rename files; equivalent of copy + remove |
|
4253 | 4253 | |
|
4254 | 4254 | Mark dest as copies of sources; mark sources for deletion. If dest |
|
4255 | 4255 | is a directory, copies are put in that directory. If dest is a |
|
4256 | 4256 | file, there can only be one source. |
|
4257 | 4257 | |
|
4258 | 4258 | By default, this command copies the contents of files as they |
|
4259 | 4259 | exist in the working directory. If invoked with -A/--after, the |
|
4260 | 4260 | operation is recorded, but no copying is performed. |
|
4261 | 4261 | |
|
4262 | 4262 | This command takes effect at the next commit. To undo a rename |
|
4263 | 4263 | before that, see :hg:`revert`. |
|
4264 | 4264 | |
|
4265 | 4265 | Returns 0 on success, 1 if errors are encountered. |
|
4266 | 4266 | """ |
|
4267 | 4267 | opts = pycompat.byteskwargs(opts) |
|
4268 | 4268 | with repo.wlock(False): |
|
4269 | 4269 | return cmdutil.copy(ui, repo, pats, opts, rename=True) |
|
4270 | 4270 | |
|
4271 | 4271 | @command('resolve', |
|
4272 | 4272 | [('a', 'all', None, _('select all unresolved files')), |
|
4273 | 4273 | ('l', 'list', None, _('list state of files needing merge')), |
|
4274 | 4274 | ('m', 'mark', None, _('mark files as resolved')), |
|
4275 | 4275 | ('u', 'unmark', None, _('mark files as unresolved')), |
|
4276 | 4276 | ('n', 'no-status', None, _('hide status prefix'))] |
|
4277 | 4277 | + mergetoolopts + walkopts + formatteropts, |
|
4278 | 4278 | _('[OPTION]... [FILE]...'), |
|
4279 | 4279 | inferrepo=True) |
|
4280 | 4280 | def resolve(ui, repo, *pats, **opts): |
|
4281 | 4281 | """redo merges or set/view the merge status of files |
|
4282 | 4282 | |
|
4283 | 4283 | Merges with unresolved conflicts are often the result of |
|
4284 | 4284 | non-interactive merging using the ``internal:merge`` configuration |
|
4285 | 4285 | setting, or a command-line merge tool like ``diff3``. The resolve |
|
4286 | 4286 | command is used to manage the files involved in a merge, after |
|
4287 | 4287 | :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the |
|
4288 | 4288 | working directory must have two parents). See :hg:`help |
|
4289 | 4289 | merge-tools` for information on configuring merge tools. |
|
4290 | 4290 | |
|
4291 | 4291 | The resolve command can be used in the following ways: |
|
4292 | 4292 | |
|
4293 | 4293 | - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified |
|
4294 | 4294 | files, discarding any previous merge attempts. Re-merging is not |
|
4295 | 4295 | performed for files already marked as resolved. Use ``--all/-a`` |
|
4296 | 4296 | to select all unresolved files. ``--tool`` can be used to specify |
|
4297 | 4297 | the merge tool used for the given files. It overrides the HGMERGE |
|
4298 | 4298 | environment variable and your configuration files. Previous file |
|
4299 | 4299 | contents are saved with a ``.orig`` suffix. |
|
4300 | 4300 | |
|
4301 | 4301 | - :hg:`resolve -m [FILE]`: mark a file as having been resolved |
|
4302 | 4302 | (e.g. after having manually fixed-up the files). The default is |
|
4303 | 4303 | to mark all unresolved files. |
|
4304 | 4304 | |
|
4305 | 4305 | - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The |
|
4306 | 4306 | default is to mark all resolved files. |
|
4307 | 4307 | |
|
4308 | 4308 | - :hg:`resolve -l`: list files which had or still have conflicts. |
|
4309 | 4309 | In the printed list, ``U`` = unresolved and ``R`` = resolved. |
|
4310 | 4310 | You can use ``set:unresolved()`` or ``set:resolved()`` to filter |
|
4311 | 4311 | the list. See :hg:`help filesets` for details. |
|
4312 | 4312 | |
|
4313 | 4313 | .. note:: |
|
4314 | 4314 | |
|
4315 | 4315 | Mercurial will not let you commit files with unresolved merge |
|
4316 | 4316 | conflicts. You must use :hg:`resolve -m ...` before you can |
|
4317 | 4317 | commit after a conflicting merge. |
|
4318 | 4318 | |
|
4319 | 4319 | Returns 0 on success, 1 if any files fail a resolve attempt. |
|
4320 | 4320 | """ |
|
4321 | 4321 | |
|
4322 | 4322 | opts = pycompat.byteskwargs(opts) |
|
4323 | 4323 | flaglist = 'all mark unmark list no_status'.split() |
|
4324 | 4324 | all, mark, unmark, show, nostatus = \ |
|
4325 | 4325 | [opts.get(o) for o in flaglist] |
|
4326 | 4326 | |
|
4327 | 4327 | if (show and (mark or unmark)) or (mark and unmark): |
|
4328 | 4328 | raise error.Abort(_("too many options specified")) |
|
4329 | 4329 | if pats and all: |
|
4330 | 4330 | raise error.Abort(_("can't specify --all and patterns")) |
|
4331 | 4331 | if not (all or pats or show or mark or unmark): |
|
4332 | 4332 | raise error.Abort(_('no files or directories specified'), |
|
4333 | 4333 | hint=('use --all to re-merge all unresolved files')) |
|
4334 | 4334 | |
|
4335 | 4335 | if show: |
|
4336 | 4336 | ui.pager('resolve') |
|
4337 | 4337 | fm = ui.formatter('resolve', opts) |
|
4338 | 4338 | ms = mergemod.mergestate.read(repo) |
|
4339 | 4339 | m = scmutil.match(repo[None], pats, opts) |
|
4340 | 4340 | |
|
4341 | 4341 | # Labels and keys based on merge state. Unresolved path conflicts show |
|
4342 | 4342 | # as 'P'. Resolved path conflicts show as 'R', the same as normal |
|
4343 | 4343 | # resolved conflicts. |
|
4344 | 4344 | mergestateinfo = { |
|
4345 | 4345 | 'u': ('resolve.unresolved', 'U'), |
|
4346 | 4346 | 'r': ('resolve.resolved', 'R'), |
|
4347 | 4347 | 'pu': ('resolve.unresolved', 'P'), |
|
4348 | 4348 | 'pr': ('resolve.resolved', 'R'), |
|
4349 | 4349 | 'd': ('resolve.driverresolved', 'D'), |
|
4350 | 4350 | } |
|
4351 | 4351 | |
|
4352 | 4352 | for f in ms: |
|
4353 | 4353 | if not m(f): |
|
4354 | 4354 | continue |
|
4355 | 4355 | |
|
4356 | 4356 | label, key = mergestateinfo[ms[f]] |
|
4357 | 4357 | fm.startitem() |
|
4358 | 4358 | fm.condwrite(not nostatus, 'status', '%s ', key, label=label) |
|
4359 | 4359 | fm.write('path', '%s\n', f, label=label) |
|
4360 | 4360 | fm.end() |
|
4361 | 4361 | return 0 |
|
4362 | 4362 | |
|
4363 | 4363 | with repo.wlock(): |
|
4364 | 4364 | ms = mergemod.mergestate.read(repo) |
|
4365 | 4365 | |
|
4366 | 4366 | if not (ms.active() or repo.dirstate.p2() != nullid): |
|
4367 | 4367 | raise error.Abort( |
|
4368 | 4368 | _('resolve command not applicable when not merging')) |
|
4369 | 4369 | |
|
4370 | 4370 | wctx = repo[None] |
|
4371 | 4371 | |
|
4372 | 4372 | if ms.mergedriver and ms.mdstate() == 'u': |
|
4373 | 4373 | proceed = mergemod.driverpreprocess(repo, ms, wctx) |
|
4374 | 4374 | ms.commit() |
|
4375 | 4375 | # allow mark and unmark to go through |
|
4376 | 4376 | if not mark and not unmark and not proceed: |
|
4377 | 4377 | return 1 |
|
4378 | 4378 | |
|
4379 | 4379 | m = scmutil.match(wctx, pats, opts) |
|
4380 | 4380 | ret = 0 |
|
4381 | 4381 | didwork = False |
|
4382 | 4382 | runconclude = False |
|
4383 | 4383 | |
|
4384 | 4384 | tocomplete = [] |
|
4385 | 4385 | for f in ms: |
|
4386 | 4386 | if not m(f): |
|
4387 | 4387 | continue |
|
4388 | 4388 | |
|
4389 | 4389 | didwork = True |
|
4390 | 4390 | |
|
4391 | 4391 | # don't let driver-resolved files be marked, and run the conclude |
|
4392 | 4392 | # step if asked to resolve |
|
4393 | 4393 | if ms[f] == "d": |
|
4394 | 4394 | exact = m.exact(f) |
|
4395 | 4395 | if mark: |
|
4396 | 4396 | if exact: |
|
4397 | 4397 | ui.warn(_('not marking %s as it is driver-resolved\n') |
|
4398 | 4398 | % f) |
|
4399 | 4399 | elif unmark: |
|
4400 | 4400 | if exact: |
|
4401 | 4401 | ui.warn(_('not unmarking %s as it is driver-resolved\n') |
|
4402 | 4402 | % f) |
|
4403 | 4403 | else: |
|
4404 | 4404 | runconclude = True |
|
4405 | 4405 | continue |
|
4406 | 4406 | |
|
4407 | 4407 | # path conflicts must be resolved manually |
|
4408 | 4408 | if ms[f] in ("pu", "pr"): |
|
4409 | 4409 | if mark: |
|
4410 | 4410 | ms.mark(f, "pr") |
|
4411 | 4411 | elif unmark: |
|
4412 | 4412 | ms.mark(f, "pu") |
|
4413 | 4413 | elif ms[f] == "pu": |
|
4414 | 4414 | ui.warn(_('%s: path conflict must be resolved manually\n') |
|
4415 | 4415 | % f) |
|
4416 | 4416 | continue |
|
4417 | 4417 | |
|
4418 | 4418 | if mark: |
|
4419 | 4419 | ms.mark(f, "r") |
|
4420 | 4420 | elif unmark: |
|
4421 | 4421 | ms.mark(f, "u") |
|
4422 | 4422 | else: |
|
4423 | 4423 | # backup pre-resolve (merge uses .orig for its own purposes) |
|
4424 | 4424 | a = repo.wjoin(f) |
|
4425 | 4425 | try: |
|
4426 | 4426 | util.copyfile(a, a + ".resolve") |
|
4427 | 4427 | except (IOError, OSError) as inst: |
|
4428 | 4428 | if inst.errno != errno.ENOENT: |
|
4429 | 4429 | raise |
|
4430 | 4430 | |
|
4431 | 4431 | try: |
|
4432 | 4432 | # preresolve file |
|
4433 | 4433 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), |
|
4434 | 4434 | 'resolve') |
|
4435 | 4435 | complete, r = ms.preresolve(f, wctx) |
|
4436 | 4436 | if not complete: |
|
4437 | 4437 | tocomplete.append(f) |
|
4438 | 4438 | elif r: |
|
4439 | 4439 | ret = 1 |
|
4440 | 4440 | finally: |
|
4441 | 4441 | ui.setconfig('ui', 'forcemerge', '', 'resolve') |
|
4442 | 4442 | ms.commit() |
|
4443 | 4443 | |
|
4444 | 4444 | # replace filemerge's .orig file with our resolve file, but only |
|
4445 | 4445 | # for merges that are complete |
|
4446 | 4446 | if complete: |
|
4447 | 4447 | try: |
|
4448 | 4448 | util.rename(a + ".resolve", |
|
4449 | 4449 | scmutil.origpath(ui, repo, a)) |
|
4450 | 4450 | except OSError as inst: |
|
4451 | 4451 | if inst.errno != errno.ENOENT: |
|
4452 | 4452 | raise |
|
4453 | 4453 | |
|
4454 | 4454 | for f in tocomplete: |
|
4455 | 4455 | try: |
|
4456 | 4456 | # resolve file |
|
4457 | 4457 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), |
|
4458 | 4458 | 'resolve') |
|
4459 | 4459 | r = ms.resolve(f, wctx) |
|
4460 | 4460 | if r: |
|
4461 | 4461 | ret = 1 |
|
4462 | 4462 | finally: |
|
4463 | 4463 | ui.setconfig('ui', 'forcemerge', '', 'resolve') |
|
4464 | 4464 | ms.commit() |
|
4465 | 4465 | |
|
4466 | 4466 | # replace filemerge's .orig file with our resolve file |
|
4467 | 4467 | a = repo.wjoin(f) |
|
4468 | 4468 | try: |
|
4469 | 4469 | util.rename(a + ".resolve", scmutil.origpath(ui, repo, a)) |
|
4470 | 4470 | except OSError as inst: |
|
4471 | 4471 | if inst.errno != errno.ENOENT: |
|
4472 | 4472 | raise |
|
4473 | 4473 | |
|
4474 | 4474 | ms.commit() |
|
4475 | 4475 | ms.recordactions() |
|
4476 | 4476 | |
|
4477 | 4477 | if not didwork and pats: |
|
4478 | 4478 | hint = None |
|
4479 | 4479 | if not any([p for p in pats if p.find(':') >= 0]): |
|
4480 | 4480 | pats = ['path:%s' % p for p in pats] |
|
4481 | 4481 | m = scmutil.match(wctx, pats, opts) |
|
4482 | 4482 | for f in ms: |
|
4483 | 4483 | if not m(f): |
|
4484 | 4484 | continue |
|
4485 | 4485 | flags = ''.join(['-%s ' % o[0] for o in flaglist |
|
4486 | 4486 | if opts.get(o)]) |
|
4487 | 4487 | hint = _("(try: hg resolve %s%s)\n") % ( |
|
4488 | 4488 | flags, |
|
4489 | 4489 | ' '.join(pats)) |
|
4490 | 4490 | break |
|
4491 | 4491 | ui.warn(_("arguments do not match paths that need resolving\n")) |
|
4492 | 4492 | if hint: |
|
4493 | 4493 | ui.warn(hint) |
|
4494 | 4494 | elif ms.mergedriver and ms.mdstate() != 's': |
|
4495 | 4495 | # run conclude step when either a driver-resolved file is requested |
|
4496 | 4496 | # or there are no driver-resolved files |
|
4497 | 4497 | # we can't use 'ret' to determine whether any files are unresolved |
|
4498 | 4498 | # because we might not have tried to resolve some |
|
4499 | 4499 | if ((runconclude or not list(ms.driverresolved())) |
|
4500 | 4500 | and not list(ms.unresolved())): |
|
4501 | 4501 | proceed = mergemod.driverconclude(repo, ms, wctx) |
|
4502 | 4502 | ms.commit() |
|
4503 | 4503 | if not proceed: |
|
4504 | 4504 | return 1 |
|
4505 | 4505 | |
|
4506 | 4506 | # Nudge users into finishing an unfinished operation |
|
4507 | 4507 | unresolvedf = list(ms.unresolved()) |
|
4508 | 4508 | driverresolvedf = list(ms.driverresolved()) |
|
4509 | 4509 | if not unresolvedf and not driverresolvedf: |
|
4510 | 4510 | ui.status(_('(no more unresolved files)\n')) |
|
4511 | 4511 | cmdutil.checkafterresolved(repo) |
|
4512 | 4512 | elif not unresolvedf: |
|
4513 | 4513 | ui.status(_('(no more unresolved files -- ' |
|
4514 | 4514 | 'run "hg resolve --all" to conclude)\n')) |
|
4515 | 4515 | |
|
4516 | 4516 | return ret |
|
4517 | 4517 | |
|
4518 | 4518 | @command('revert', |
|
4519 | 4519 | [('a', 'all', None, _('revert all changes when no arguments given')), |
|
4520 | 4520 | ('d', 'date', '', _('tipmost revision matching date'), _('DATE')), |
|
4521 | 4521 | ('r', 'rev', '', _('revert to the specified revision'), _('REV')), |
|
4522 | 4522 | ('C', 'no-backup', None, _('do not save backup copies of files')), |
|
4523 | 4523 | ('i', 'interactive', None, _('interactively select the changes')), |
|
4524 | 4524 | ] + walkopts + dryrunopts, |
|
4525 | 4525 | _('[OPTION]... [-r REV] [NAME]...')) |
|
4526 | 4526 | def revert(ui, repo, *pats, **opts): |
|
4527 | 4527 | """restore files to their checkout state |
|
4528 | 4528 | |
|
4529 | 4529 | .. note:: |
|
4530 | 4530 | |
|
4531 | 4531 | To check out earlier revisions, you should use :hg:`update REV`. |
|
4532 | 4532 | To cancel an uncommitted merge (and lose your changes), |
|
4533 | 4533 | use :hg:`merge --abort`. |
|
4534 | 4534 | |
|
4535 | 4535 | With no revision specified, revert the specified files or directories |
|
4536 | 4536 | to the contents they had in the parent of the working directory. |
|
4537 | 4537 | This restores the contents of files to an unmodified |
|
4538 | 4538 | state and unschedules adds, removes, copies, and renames. If the |
|
4539 | 4539 | working directory has two parents, you must explicitly specify a |
|
4540 | 4540 | revision. |
|
4541 | 4541 | |
|
4542 | 4542 | Using the -r/--rev or -d/--date options, revert the given files or |
|
4543 | 4543 | directories to their states as of a specific revision. Because |
|
4544 | 4544 | revert does not change the working directory parents, this will |
|
4545 | 4545 | cause these files to appear modified. This can be helpful to "back |
|
4546 | 4546 | out" some or all of an earlier change. See :hg:`backout` for a |
|
4547 | 4547 | related method. |
|
4548 | 4548 | |
|
4549 | 4549 | Modified files are saved with a .orig suffix before reverting. |
|
4550 | 4550 | To disable these backups, use --no-backup. It is possible to store |
|
4551 | 4551 | the backup files in a custom directory relative to the root of the |
|
4552 | 4552 | repository by setting the ``ui.origbackuppath`` configuration |
|
4553 | 4553 | option. |
|
4554 | 4554 | |
|
4555 | 4555 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
4556 | 4556 | |
|
4557 | 4557 | See :hg:`help backout` for a way to reverse the effect of an |
|
4558 | 4558 | earlier changeset. |
|
4559 | 4559 | |
|
4560 | 4560 | Returns 0 on success. |
|
4561 | 4561 | """ |
|
4562 | 4562 | |
|
4563 | 4563 | opts = pycompat.byteskwargs(opts) |
|
4564 | 4564 | if opts.get("date"): |
|
4565 | 4565 | if opts.get("rev"): |
|
4566 | 4566 | raise error.Abort(_("you can't specify a revision and a date")) |
|
4567 | 4567 | opts["rev"] = cmdutil.finddate(ui, repo, opts["date"]) |
|
4568 | 4568 | |
|
4569 | 4569 | parent, p2 = repo.dirstate.parents() |
|
4570 | 4570 | if not opts.get('rev') and p2 != nullid: |
|
4571 | 4571 | # revert after merge is a trap for new users (issue2915) |
|
4572 | 4572 | raise error.Abort(_('uncommitted merge with no revision specified'), |
|
4573 | 4573 | hint=_("use 'hg update' or see 'hg help revert'")) |
|
4574 | 4574 | |
|
4575 | 4575 | rev = opts.get('rev') |
|
4576 | 4576 | if rev: |
|
4577 | 4577 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
4578 | 4578 | ctx = scmutil.revsingle(repo, rev) |
|
4579 | 4579 | |
|
4580 | 4580 | if (not (pats or opts.get('include') or opts.get('exclude') or |
|
4581 | 4581 | opts.get('all') or opts.get('interactive'))): |
|
4582 | 4582 | msg = _("no files or directories specified") |
|
4583 | 4583 | if p2 != nullid: |
|
4584 | 4584 | hint = _("uncommitted merge, use --all to discard all changes," |
|
4585 | 4585 | " or 'hg update -C .' to abort the merge") |
|
4586 | 4586 | raise error.Abort(msg, hint=hint) |
|
4587 | 4587 | dirty = any(repo.status()) |
|
4588 | 4588 | node = ctx.node() |
|
4589 | 4589 | if node != parent: |
|
4590 | 4590 | if dirty: |
|
4591 | 4591 | hint = _("uncommitted changes, use --all to discard all" |
|
4592 | 4592 | " changes, or 'hg update %s' to update") % ctx.rev() |
|
4593 | 4593 | else: |
|
4594 | 4594 | hint = _("use --all to revert all files," |
|
4595 | 4595 | " or 'hg update %s' to update") % ctx.rev() |
|
4596 | 4596 | elif dirty: |
|
4597 | 4597 | hint = _("uncommitted changes, use --all to discard all changes") |
|
4598 | 4598 | else: |
|
4599 | 4599 | hint = _("use --all to revert all files") |
|
4600 | 4600 | raise error.Abort(msg, hint=hint) |
|
4601 | 4601 | |
|
4602 | 4602 | return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats, |
|
4603 | 4603 | **pycompat.strkwargs(opts)) |
|
4604 | 4604 | |
|
4605 | 4605 | @command('rollback', dryrunopts + |
|
4606 | 4606 | [('f', 'force', False, _('ignore safety measures'))]) |
|
4607 | 4607 | def rollback(ui, repo, **opts): |
|
4608 | 4608 | """roll back the last transaction (DANGEROUS) (DEPRECATED) |
|
4609 | 4609 | |
|
4610 | 4610 | Please use :hg:`commit --amend` instead of rollback to correct |
|
4611 | 4611 | mistakes in the last commit. |
|
4612 | 4612 | |
|
4613 | 4613 | This command should be used with care. There is only one level of |
|
4614 | 4614 | rollback, and there is no way to undo a rollback. It will also |
|
4615 | 4615 | restore the dirstate at the time of the last transaction, losing |
|
4616 | 4616 | any dirstate changes since that time. This command does not alter |
|
4617 | 4617 | the working directory. |
|
4618 | 4618 | |
|
4619 | 4619 | Transactions are used to encapsulate the effects of all commands |
|
4620 | 4620 | that create new changesets or propagate existing changesets into a |
|
4621 | 4621 | repository. |
|
4622 | 4622 | |
|
4623 | 4623 | .. container:: verbose |
|
4624 | 4624 | |
|
4625 | 4625 | For example, the following commands are transactional, and their |
|
4626 | 4626 | effects can be rolled back: |
|
4627 | 4627 | |
|
4628 | 4628 | - commit |
|
4629 | 4629 | - import |
|
4630 | 4630 | - pull |
|
4631 | 4631 | - push (with this repository as the destination) |
|
4632 | 4632 | - unbundle |
|
4633 | 4633 | |
|
4634 | 4634 | To avoid permanent data loss, rollback will refuse to rollback a |
|
4635 | 4635 | commit transaction if it isn't checked out. Use --force to |
|
4636 | 4636 | override this protection. |
|
4637 | 4637 | |
|
4638 | 4638 | The rollback command can be entirely disabled by setting the |
|
4639 | 4639 | ``ui.rollback`` configuration setting to false. If you're here |
|
4640 | 4640 | because you want to use rollback and it's disabled, you can |
|
4641 | 4641 | re-enable the command by setting ``ui.rollback`` to true. |
|
4642 | 4642 | |
|
4643 | 4643 | This command is not intended for use on public repositories. Once |
|
4644 | 4644 | changes are visible for pull by other users, rolling a transaction |
|
4645 | 4645 | back locally is ineffective (someone else may already have pulled |
|
4646 | 4646 | the changes). Furthermore, a race is possible with readers of the |
|
4647 | 4647 | repository; for example an in-progress pull from the repository |
|
4648 | 4648 | may fail if a rollback is performed. |
|
4649 | 4649 | |
|
4650 | 4650 | Returns 0 on success, 1 if no rollback data is available. |
|
4651 | 4651 | """ |
|
4652 | 4652 | if not ui.configbool('ui', 'rollback'): |
|
4653 | 4653 | raise error.Abort(_('rollback is disabled because it is unsafe'), |
|
4654 | 4654 | hint=('see `hg help -v rollback` for information')) |
|
4655 | 4655 | return repo.rollback(dryrun=opts.get(r'dry_run'), |
|
4656 | 4656 | force=opts.get(r'force')) |
|
4657 | 4657 | |
|
4658 | 4658 | @command('root', [], cmdtype=readonly) |
|
4659 | 4659 | def root(ui, repo): |
|
4660 | 4660 | """print the root (top) of the current working directory |
|
4661 | 4661 | |
|
4662 | 4662 | Print the root directory of the current repository. |
|
4663 | 4663 | |
|
4664 | 4664 | Returns 0 on success. |
|
4665 | 4665 | """ |
|
4666 | 4666 | ui.write(repo.root + "\n") |
|
4667 | 4667 | |
|
4668 | 4668 | @command('^serve', |
|
4669 | 4669 | [('A', 'accesslog', '', _('name of access log file to write to'), |
|
4670 | 4670 | _('FILE')), |
|
4671 | 4671 | ('d', 'daemon', None, _('run server in background')), |
|
4672 | 4672 | ('', 'daemon-postexec', [], _('used internally by daemon mode')), |
|
4673 | 4673 | ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')), |
|
4674 | 4674 | # use string type, then we can check if something was passed |
|
4675 | 4675 | ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')), |
|
4676 | 4676 | ('a', 'address', '', _('address to listen on (default: all interfaces)'), |
|
4677 | 4677 | _('ADDR')), |
|
4678 | 4678 | ('', 'prefix', '', _('prefix path to serve from (default: server root)'), |
|
4679 | 4679 | _('PREFIX')), |
|
4680 | 4680 | ('n', 'name', '', |
|
4681 | 4681 | _('name to show in web pages (default: working directory)'), _('NAME')), |
|
4682 | 4682 | ('', 'web-conf', '', |
|
4683 | 4683 | _("name of the hgweb config file (see 'hg help hgweb')"), _('FILE')), |
|
4684 | 4684 | ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'), |
|
4685 | 4685 | _('FILE')), |
|
4686 | 4686 | ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')), |
|
4687 | 4687 | ('', 'stdio', None, _('for remote clients (ADVANCED)')), |
|
4688 | 4688 | ('', 'cmdserver', '', _('for remote clients (ADVANCED)'), _('MODE')), |
|
4689 | 4689 | ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')), |
|
4690 | 4690 | ('', 'style', '', _('template style to use'), _('STYLE')), |
|
4691 | 4691 | ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')), |
|
4692 | 4692 | ('', 'certificate', '', _('SSL certificate file'), _('FILE'))] |
|
4693 | 4693 | + subrepoopts, |
|
4694 | 4694 | _('[OPTION]...'), |
|
4695 | 4695 | optionalrepo=True) |
|
4696 | 4696 | def serve(ui, repo, **opts): |
|
4697 | 4697 | """start stand-alone webserver |
|
4698 | 4698 | |
|
4699 | 4699 | Start a local HTTP repository browser and pull server. You can use |
|
4700 | 4700 | this for ad-hoc sharing and browsing of repositories. It is |
|
4701 | 4701 | recommended to use a real web server to serve a repository for |
|
4702 | 4702 | longer periods of time. |
|
4703 | 4703 | |
|
4704 | 4704 | Please note that the server does not implement access control. |
|
4705 | 4705 | This means that, by default, anybody can read from the server and |
|
4706 | 4706 | nobody can write to it by default. Set the ``web.allow-push`` |
|
4707 | 4707 | option to ``*`` to allow everybody to push to the server. You |
|
4708 | 4708 | should use a real web server if you need to authenticate users. |
|
4709 | 4709 | |
|
4710 | 4710 | By default, the server logs accesses to stdout and errors to |
|
4711 | 4711 | stderr. Use the -A/--accesslog and -E/--errorlog options to log to |
|
4712 | 4712 | files. |
|
4713 | 4713 | |
|
4714 | 4714 | To have the server choose a free port number to listen on, specify |
|
4715 | 4715 | a port number of 0; in this case, the server will print the port |
|
4716 | 4716 | number it uses. |
|
4717 | 4717 | |
|
4718 | 4718 | Returns 0 on success. |
|
4719 | 4719 | """ |
|
4720 | 4720 | |
|
4721 | 4721 | opts = pycompat.byteskwargs(opts) |
|
4722 | 4722 | if opts["stdio"] and opts["cmdserver"]: |
|
4723 | 4723 | raise error.Abort(_("cannot use --stdio with --cmdserver")) |
|
4724 | 4724 | |
|
4725 | 4725 | if opts["stdio"]: |
|
4726 | 4726 | if repo is None: |
|
4727 | 4727 | raise error.RepoError(_("there is no Mercurial repository here" |
|
4728 | 4728 | " (.hg not found)")) |
|
4729 | 4729 | s = wireprotoserver.sshserver(ui, repo) |
|
4730 | 4730 | s.serve_forever() |
|
4731 | 4731 | |
|
4732 | 4732 | service = server.createservice(ui, repo, opts) |
|
4733 | 4733 | return server.runservice(opts, initfn=service.init, runfn=service.run) |
|
4734 | 4734 | |
|
4735 | 4735 | @command('^status|st', |
|
4736 | 4736 | [('A', 'all', None, _('show status of all files')), |
|
4737 | 4737 | ('m', 'modified', None, _('show only modified files')), |
|
4738 | 4738 | ('a', 'added', None, _('show only added files')), |
|
4739 | 4739 | ('r', 'removed', None, _('show only removed files')), |
|
4740 | 4740 | ('d', 'deleted', None, _('show only deleted (but tracked) files')), |
|
4741 | 4741 | ('c', 'clean', None, _('show only files without changes')), |
|
4742 | 4742 | ('u', 'unknown', None, _('show only unknown (not tracked) files')), |
|
4743 | 4743 | ('i', 'ignored', None, _('show only ignored files')), |
|
4744 | 4744 | ('n', 'no-status', None, _('hide status prefix')), |
|
4745 | 4745 | ('t', 'terse', '', _('show the terse output (EXPERIMENTAL)')), |
|
4746 | 4746 | ('C', 'copies', None, _('show source of copied files')), |
|
4747 | 4747 | ('0', 'print0', None, _('end filenames with NUL, for use with xargs')), |
|
4748 | 4748 | ('', 'rev', [], _('show difference from revision'), _('REV')), |
|
4749 | 4749 | ('', 'change', '', _('list the changed files of a revision'), _('REV')), |
|
4750 | 4750 | ] + walkopts + subrepoopts + formatteropts, |
|
4751 | 4751 | _('[OPTION]... [FILE]...'), |
|
4752 | 4752 | inferrepo=True, cmdtype=readonly) |
|
4753 | 4753 | def status(ui, repo, *pats, **opts): |
|
4754 | 4754 | """show changed files in the working directory |
|
4755 | 4755 | |
|
4756 | 4756 | Show status of files in the repository. If names are given, only |
|
4757 | 4757 | files that match are shown. Files that are clean or ignored or |
|
4758 | 4758 | the source of a copy/move operation, are not listed unless |
|
4759 | 4759 | -c/--clean, -i/--ignored, -C/--copies or -A/--all are given. |
|
4760 | 4760 | Unless options described with "show only ..." are given, the |
|
4761 | 4761 | options -mardu are used. |
|
4762 | 4762 | |
|
4763 | 4763 | Option -q/--quiet hides untracked (unknown and ignored) files |
|
4764 | 4764 | unless explicitly requested with -u/--unknown or -i/--ignored. |
|
4765 | 4765 | |
|
4766 | 4766 | .. note:: |
|
4767 | 4767 | |
|
4768 | 4768 | :hg:`status` may appear to disagree with diff if permissions have |
|
4769 | 4769 | changed or a merge has occurred. The standard diff format does |
|
4770 | 4770 | not report permission changes and diff only reports changes |
|
4771 | 4771 | relative to one merge parent. |
|
4772 | 4772 | |
|
4773 | 4773 | If one revision is given, it is used as the base revision. |
|
4774 | 4774 | If two revisions are given, the differences between them are |
|
4775 | 4775 | shown. The --change option can also be used as a shortcut to list |
|
4776 | 4776 | the changed files of a revision from its first parent. |
|
4777 | 4777 | |
|
4778 | 4778 | The codes used to show the status of files are:: |
|
4779 | 4779 | |
|
4780 | 4780 | M = modified |
|
4781 | 4781 | A = added |
|
4782 | 4782 | R = removed |
|
4783 | 4783 | C = clean |
|
4784 | 4784 | ! = missing (deleted by non-hg command, but still tracked) |
|
4785 | 4785 | ? = not tracked |
|
4786 | 4786 | I = ignored |
|
4787 | 4787 | = origin of the previous file (with --copies) |
|
4788 | 4788 | |
|
4789 | 4789 | .. container:: verbose |
|
4790 | 4790 | |
|
4791 | 4791 | The -t/--terse option abbreviates the output by showing only the directory |
|
4792 | 4792 | name if all the files in it share the same status. The option takes an |
|
4793 | 4793 | argument indicating the statuses to abbreviate: 'm' for 'modified', 'a' |
|
4794 | 4794 | for 'added', 'r' for 'removed', 'd' for 'deleted', 'u' for 'unknown', 'i' |
|
4795 | 4795 | for 'ignored' and 'c' for clean. |
|
4796 | 4796 | |
|
4797 | 4797 | It abbreviates only those statuses which are passed. Note that clean and |
|
4798 | 4798 | ignored files are not displayed with '--terse ic' unless the -c/--clean |
|
4799 | 4799 | and -i/--ignored options are also used. |
|
4800 | 4800 | |
|
4801 | 4801 | The -v/--verbose option shows information when the repository is in an |
|
4802 | 4802 | unfinished merge, shelve, rebase state etc. You can have this behavior |
|
4803 | 4803 | turned on by default by enabling the ``commands.status.verbose`` option. |
|
4804 | 4804 | |
|
4805 | 4805 | You can skip displaying some of these states by setting |
|
4806 | 4806 | ``commands.status.skipstates`` to one or more of: 'bisect', 'graft', |
|
4807 | 4807 | 'histedit', 'merge', 'rebase', or 'unshelve'. |
|
4808 | 4808 | |
|
4809 | 4809 | Examples: |
|
4810 | 4810 | |
|
4811 | 4811 | - show changes in the working directory relative to a |
|
4812 | 4812 | changeset:: |
|
4813 | 4813 | |
|
4814 | 4814 | hg status --rev 9353 |
|
4815 | 4815 | |
|
4816 | 4816 | - show changes in the working directory relative to the |
|
4817 | 4817 | current directory (see :hg:`help patterns` for more information):: |
|
4818 | 4818 | |
|
4819 | 4819 | hg status re: |
|
4820 | 4820 | |
|
4821 | 4821 | - show all changes including copies in an existing changeset:: |
|
4822 | 4822 | |
|
4823 | 4823 | hg status --copies --change 9353 |
|
4824 | 4824 | |
|
4825 | 4825 | - get a NUL separated list of added files, suitable for xargs:: |
|
4826 | 4826 | |
|
4827 | 4827 | hg status -an0 |
|
4828 | 4828 | |
|
4829 | 4829 | - show more information about the repository status, abbreviating |
|
4830 | 4830 | added, removed, modified, deleted, and untracked paths:: |
|
4831 | 4831 | |
|
4832 | 4832 | hg status -v -t mardu |
|
4833 | 4833 | |
|
4834 | 4834 | Returns 0 on success. |
|
4835 | 4835 | |
|
4836 | 4836 | """ |
|
4837 | 4837 | |
|
4838 | 4838 | opts = pycompat.byteskwargs(opts) |
|
4839 | 4839 | revs = opts.get('rev') |
|
4840 | 4840 | change = opts.get('change') |
|
4841 | 4841 | terse = opts.get('terse') |
|
4842 | 4842 | |
|
4843 | 4843 | if revs and change: |
|
4844 | 4844 | msg = _('cannot specify --rev and --change at the same time') |
|
4845 | 4845 | raise error.Abort(msg) |
|
4846 | 4846 | elif revs and terse: |
|
4847 | 4847 | msg = _('cannot use --terse with --rev') |
|
4848 | 4848 | raise error.Abort(msg) |
|
4849 | 4849 | elif change: |
|
4850 | 4850 | repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn') |
|
4851 | 4851 | node2 = scmutil.revsingle(repo, change, None).node() |
|
4852 | 4852 | node1 = repo[node2].p1().node() |
|
4853 | 4853 | else: |
|
4854 | 4854 | repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn') |
|
4855 | 4855 | node1, node2 = scmutil.revpair(repo, revs) |
|
4856 | 4856 | |
|
4857 | 4857 | if pats or ui.configbool('commands', 'status.relative'): |
|
4858 | 4858 | cwd = repo.getcwd() |
|
4859 | 4859 | else: |
|
4860 | 4860 | cwd = '' |
|
4861 | 4861 | |
|
4862 | 4862 | if opts.get('print0'): |
|
4863 | 4863 | end = '\0' |
|
4864 | 4864 | else: |
|
4865 | 4865 | end = '\n' |
|
4866 | 4866 | copy = {} |
|
4867 | 4867 | states = 'modified added removed deleted unknown ignored clean'.split() |
|
4868 | 4868 | show = [k for k in states if opts.get(k)] |
|
4869 | 4869 | if opts.get('all'): |
|
4870 | 4870 | show += ui.quiet and (states[:4] + ['clean']) or states |
|
4871 | 4871 | |
|
4872 | 4872 | if not show: |
|
4873 | 4873 | if ui.quiet: |
|
4874 | 4874 | show = states[:4] |
|
4875 | 4875 | else: |
|
4876 | 4876 | show = states[:5] |
|
4877 | 4877 | |
|
4878 | 4878 | m = scmutil.match(repo[node2], pats, opts) |
|
4879 | 4879 | if terse: |
|
4880 | 4880 | # we need to compute clean and unknown to terse |
|
4881 | 4881 | stat = repo.status(node1, node2, m, |
|
4882 | 4882 | 'ignored' in show or 'i' in terse, |
|
4883 | 4883 | True, True, opts.get('subrepos')) |
|
4884 | 4884 | |
|
4885 | 4885 | stat = cmdutil.tersedir(stat, terse) |
|
4886 | 4886 | else: |
|
4887 | 4887 | stat = repo.status(node1, node2, m, |
|
4888 | 4888 | 'ignored' in show, 'clean' in show, |
|
4889 | 4889 | 'unknown' in show, opts.get('subrepos')) |
|
4890 | 4890 | |
|
4891 | 4891 | changestates = zip(states, pycompat.iterbytestr('MAR!?IC'), stat) |
|
4892 | 4892 | |
|
4893 | 4893 | if (opts.get('all') or opts.get('copies') |
|
4894 | 4894 | or ui.configbool('ui', 'statuscopies')) and not opts.get('no_status'): |
|
4895 | 4895 | copy = copies.pathcopies(repo[node1], repo[node2], m) |
|
4896 | 4896 | |
|
4897 | 4897 | ui.pager('status') |
|
4898 | 4898 | fm = ui.formatter('status', opts) |
|
4899 | 4899 | fmt = '%s' + end |
|
4900 | 4900 | showchar = not opts.get('no_status') |
|
4901 | 4901 | |
|
4902 | 4902 | for state, char, files in changestates: |
|
4903 | 4903 | if state in show: |
|
4904 | 4904 | label = 'status.' + state |
|
4905 | 4905 | for f in files: |
|
4906 | 4906 | fm.startitem() |
|
4907 | 4907 | fm.condwrite(showchar, 'status', '%s ', char, label=label) |
|
4908 | 4908 | fm.write('path', fmt, repo.pathto(f, cwd), label=label) |
|
4909 | 4909 | if f in copy: |
|
4910 | 4910 | fm.write("copy", ' %s' + end, repo.pathto(copy[f], cwd), |
|
4911 | 4911 | label='status.copied') |
|
4912 | 4912 | |
|
4913 | 4913 | if ((ui.verbose or ui.configbool('commands', 'status.verbose')) |
|
4914 | 4914 | and not ui.plain()): |
|
4915 | 4915 | cmdutil.morestatus(repo, fm) |
|
4916 | 4916 | fm.end() |
|
4917 | 4917 | |
|
4918 | 4918 | @command('^summary|sum', |
|
4919 | 4919 | [('', 'remote', None, _('check for push and pull'))], |
|
4920 | 4920 | '[--remote]', cmdtype=readonly) |
|
4921 | 4921 | def summary(ui, repo, **opts): |
|
4922 | 4922 | """summarize working directory state |
|
4923 | 4923 | |
|
4924 | 4924 | This generates a brief summary of the working directory state, |
|
4925 | 4925 | including parents, branch, commit status, phase and available updates. |
|
4926 | 4926 | |
|
4927 | 4927 | With the --remote option, this will check the default paths for |
|
4928 | 4928 | incoming and outgoing changes. This can be time-consuming. |
|
4929 | 4929 | |
|
4930 | 4930 | Returns 0 on success. |
|
4931 | 4931 | """ |
|
4932 | 4932 | |
|
4933 | 4933 | opts = pycompat.byteskwargs(opts) |
|
4934 | 4934 | ui.pager('summary') |
|
4935 | 4935 | ctx = repo[None] |
|
4936 | 4936 | parents = ctx.parents() |
|
4937 | 4937 | pnode = parents[0].node() |
|
4938 | 4938 | marks = [] |
|
4939 | 4939 | |
|
4940 | 4940 | ms = None |
|
4941 | 4941 | try: |
|
4942 | 4942 | ms = mergemod.mergestate.read(repo) |
|
4943 | 4943 | except error.UnsupportedMergeRecords as e: |
|
4944 | 4944 | s = ' '.join(e.recordtypes) |
|
4945 | 4945 | ui.warn( |
|
4946 | 4946 | _('warning: merge state has unsupported record types: %s\n') % s) |
|
4947 | 4947 | unresolved = [] |
|
4948 | 4948 | else: |
|
4949 | 4949 | unresolved = list(ms.unresolved()) |
|
4950 | 4950 | |
|
4951 | 4951 | for p in parents: |
|
4952 | 4952 | # label with log.changeset (instead of log.parent) since this |
|
4953 | 4953 | # shows a working directory parent *changeset*: |
|
4954 | 4954 | # i18n: column positioning for "hg summary" |
|
4955 | 4955 | ui.write(_('parent: %d:%s ') % (p.rev(), p), |
|
4956 | 4956 | label=logcmdutil.changesetlabels(p)) |
|
4957 | 4957 | ui.write(' '.join(p.tags()), label='log.tag') |
|
4958 | 4958 | if p.bookmarks(): |
|
4959 | 4959 | marks.extend(p.bookmarks()) |
|
4960 | 4960 | if p.rev() == -1: |
|
4961 | 4961 | if not len(repo): |
|
4962 | 4962 | ui.write(_(' (empty repository)')) |
|
4963 | 4963 | else: |
|
4964 | 4964 | ui.write(_(' (no revision checked out)')) |
|
4965 | 4965 | if p.obsolete(): |
|
4966 | 4966 | ui.write(_(' (obsolete)')) |
|
4967 | 4967 | if p.isunstable(): |
|
4968 | 4968 | instabilities = (ui.label(instability, 'trouble.%s' % instability) |
|
4969 | 4969 | for instability in p.instabilities()) |
|
4970 | 4970 | ui.write(' (' |
|
4971 | 4971 | + ', '.join(instabilities) |
|
4972 | 4972 | + ')') |
|
4973 | 4973 | ui.write('\n') |
|
4974 | 4974 | if p.description(): |
|
4975 | 4975 | ui.status(' ' + p.description().splitlines()[0].strip() + '\n', |
|
4976 | 4976 | label='log.summary') |
|
4977 | 4977 | |
|
4978 | 4978 | branch = ctx.branch() |
|
4979 | 4979 | bheads = repo.branchheads(branch) |
|
4980 | 4980 | # i18n: column positioning for "hg summary" |
|
4981 | 4981 | m = _('branch: %s\n') % branch |
|
4982 | 4982 | if branch != 'default': |
|
4983 | 4983 | ui.write(m, label='log.branch') |
|
4984 | 4984 | else: |
|
4985 | 4985 | ui.status(m, label='log.branch') |
|
4986 | 4986 | |
|
4987 | 4987 | if marks: |
|
4988 | 4988 | active = repo._activebookmark |
|
4989 | 4989 | # i18n: column positioning for "hg summary" |
|
4990 | 4990 | ui.write(_('bookmarks:'), label='log.bookmark') |
|
4991 | 4991 | if active is not None: |
|
4992 | 4992 | if active in marks: |
|
4993 | 4993 | ui.write(' *' + active, label=bookmarks.activebookmarklabel) |
|
4994 | 4994 | marks.remove(active) |
|
4995 | 4995 | else: |
|
4996 | 4996 | ui.write(' [%s]' % active, label=bookmarks.activebookmarklabel) |
|
4997 | 4997 | for m in marks: |
|
4998 | 4998 | ui.write(' ' + m, label='log.bookmark') |
|
4999 | 4999 | ui.write('\n', label='log.bookmark') |
|
5000 | 5000 | |
|
5001 | 5001 | status = repo.status(unknown=True) |
|
5002 | 5002 | |
|
5003 | 5003 | c = repo.dirstate.copies() |
|
5004 | 5004 | copied, renamed = [], [] |
|
5005 | 5005 | for d, s in c.iteritems(): |
|
5006 | 5006 | if s in status.removed: |
|
5007 | 5007 | status.removed.remove(s) |
|
5008 | 5008 | renamed.append(d) |
|
5009 | 5009 | else: |
|
5010 | 5010 | copied.append(d) |
|
5011 | 5011 | if d in status.added: |
|
5012 | 5012 | status.added.remove(d) |
|
5013 | 5013 | |
|
5014 | 5014 | subs = [s for s in ctx.substate if ctx.sub(s).dirty()] |
|
5015 | 5015 | |
|
5016 | 5016 | labels = [(ui.label(_('%d modified'), 'status.modified'), status.modified), |
|
5017 | 5017 | (ui.label(_('%d added'), 'status.added'), status.added), |
|
5018 | 5018 | (ui.label(_('%d removed'), 'status.removed'), status.removed), |
|
5019 | 5019 | (ui.label(_('%d renamed'), 'status.copied'), renamed), |
|
5020 | 5020 | (ui.label(_('%d copied'), 'status.copied'), copied), |
|
5021 | 5021 | (ui.label(_('%d deleted'), 'status.deleted'), status.deleted), |
|
5022 | 5022 | (ui.label(_('%d unknown'), 'status.unknown'), status.unknown), |
|
5023 | 5023 | (ui.label(_('%d unresolved'), 'resolve.unresolved'), unresolved), |
|
5024 | 5024 | (ui.label(_('%d subrepos'), 'status.modified'), subs)] |
|
5025 | 5025 | t = [] |
|
5026 | 5026 | for l, s in labels: |
|
5027 | 5027 | if s: |
|
5028 | 5028 | t.append(l % len(s)) |
|
5029 | 5029 | |
|
5030 | 5030 | t = ', '.join(t) |
|
5031 | 5031 | cleanworkdir = False |
|
5032 | 5032 | |
|
5033 | 5033 | if repo.vfs.exists('graftstate'): |
|
5034 | 5034 | t += _(' (graft in progress)') |
|
5035 | 5035 | if repo.vfs.exists('updatestate'): |
|
5036 | 5036 | t += _(' (interrupted update)') |
|
5037 | 5037 | elif len(parents) > 1: |
|
5038 | 5038 | t += _(' (merge)') |
|
5039 | 5039 | elif branch != parents[0].branch(): |
|
5040 | 5040 | t += _(' (new branch)') |
|
5041 | 5041 | elif (parents[0].closesbranch() and |
|
5042 | 5042 | pnode in repo.branchheads(branch, closed=True)): |
|
5043 | 5043 | t += _(' (head closed)') |
|
5044 | 5044 | elif not (status.modified or status.added or status.removed or renamed or |
|
5045 | 5045 | copied or subs): |
|
5046 | 5046 | t += _(' (clean)') |
|
5047 | 5047 | cleanworkdir = True |
|
5048 | 5048 | elif pnode not in bheads: |
|
5049 | 5049 | t += _(' (new branch head)') |
|
5050 | 5050 | |
|
5051 | 5051 | if parents: |
|
5052 | 5052 | pendingphase = max(p.phase() for p in parents) |
|
5053 | 5053 | else: |
|
5054 | 5054 | pendingphase = phases.public |
|
5055 | 5055 | |
|
5056 | 5056 | if pendingphase > phases.newcommitphase(ui): |
|
5057 | 5057 | t += ' (%s)' % phases.phasenames[pendingphase] |
|
5058 | 5058 | |
|
5059 | 5059 | if cleanworkdir: |
|
5060 | 5060 | # i18n: column positioning for "hg summary" |
|
5061 | 5061 | ui.status(_('commit: %s\n') % t.strip()) |
|
5062 | 5062 | else: |
|
5063 | 5063 | # i18n: column positioning for "hg summary" |
|
5064 | 5064 | ui.write(_('commit: %s\n') % t.strip()) |
|
5065 | 5065 | |
|
5066 | 5066 | # all ancestors of branch heads - all ancestors of parent = new csets |
|
5067 | 5067 | new = len(repo.changelog.findmissing([pctx.node() for pctx in parents], |
|
5068 | 5068 | bheads)) |
|
5069 | 5069 | |
|
5070 | 5070 | if new == 0: |
|
5071 | 5071 | # i18n: column positioning for "hg summary" |
|
5072 | 5072 | ui.status(_('update: (current)\n')) |
|
5073 | 5073 | elif pnode not in bheads: |
|
5074 | 5074 | # i18n: column positioning for "hg summary" |
|
5075 | 5075 | ui.write(_('update: %d new changesets (update)\n') % new) |
|
5076 | 5076 | else: |
|
5077 | 5077 | # i18n: column positioning for "hg summary" |
|
5078 | 5078 | ui.write(_('update: %d new changesets, %d branch heads (merge)\n') % |
|
5079 | 5079 | (new, len(bheads))) |
|
5080 | 5080 | |
|
5081 | 5081 | t = [] |
|
5082 | 5082 | draft = len(repo.revs('draft()')) |
|
5083 | 5083 | if draft: |
|
5084 | 5084 | t.append(_('%d draft') % draft) |
|
5085 | 5085 | secret = len(repo.revs('secret()')) |
|
5086 | 5086 | if secret: |
|
5087 | 5087 | t.append(_('%d secret') % secret) |
|
5088 | 5088 | |
|
5089 | 5089 | if draft or secret: |
|
5090 | 5090 | ui.status(_('phases: %s\n') % ', '.join(t)) |
|
5091 | 5091 | |
|
5092 | 5092 | if obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
5093 | 5093 | for trouble in ("orphan", "contentdivergent", "phasedivergent"): |
|
5094 | 5094 | numtrouble = len(repo.revs(trouble + "()")) |
|
5095 | 5095 | # We write all the possibilities to ease translation |
|
5096 | 5096 | troublemsg = { |
|
5097 | 5097 | "orphan": _("orphan: %d changesets"), |
|
5098 | 5098 | "contentdivergent": _("content-divergent: %d changesets"), |
|
5099 | 5099 | "phasedivergent": _("phase-divergent: %d changesets"), |
|
5100 | 5100 | } |
|
5101 | 5101 | if numtrouble > 0: |
|
5102 | 5102 | ui.status(troublemsg[trouble] % numtrouble + "\n") |
|
5103 | 5103 | |
|
5104 | 5104 | cmdutil.summaryhooks(ui, repo) |
|
5105 | 5105 | |
|
5106 | 5106 | if opts.get('remote'): |
|
5107 | 5107 | needsincoming, needsoutgoing = True, True |
|
5108 | 5108 | else: |
|
5109 | 5109 | needsincoming, needsoutgoing = False, False |
|
5110 | 5110 | for i, o in cmdutil.summaryremotehooks(ui, repo, opts, None): |
|
5111 | 5111 | if i: |
|
5112 | 5112 | needsincoming = True |
|
5113 | 5113 | if o: |
|
5114 | 5114 | needsoutgoing = True |
|
5115 | 5115 | if not needsincoming and not needsoutgoing: |
|
5116 | 5116 | return |
|
5117 | 5117 | |
|
5118 | 5118 | def getincoming(): |
|
5119 | 5119 | source, branches = hg.parseurl(ui.expandpath('default')) |
|
5120 | 5120 | sbranch = branches[0] |
|
5121 | 5121 | try: |
|
5122 | 5122 | other = hg.peer(repo, {}, source) |
|
5123 | 5123 | except error.RepoError: |
|
5124 | 5124 | if opts.get('remote'): |
|
5125 | 5125 | raise |
|
5126 | 5126 | return source, sbranch, None, None, None |
|
5127 | 5127 | revs, checkout = hg.addbranchrevs(repo, other, branches, None) |
|
5128 | 5128 | if revs: |
|
5129 | 5129 | revs = [other.lookup(rev) for rev in revs] |
|
5130 | 5130 | ui.debug('comparing with %s\n' % util.hidepassword(source)) |
|
5131 | 5131 | repo.ui.pushbuffer() |
|
5132 | 5132 | commoninc = discovery.findcommonincoming(repo, other, heads=revs) |
|
5133 | 5133 | repo.ui.popbuffer() |
|
5134 | 5134 | return source, sbranch, other, commoninc, commoninc[1] |
|
5135 | 5135 | |
|
5136 | 5136 | if needsincoming: |
|
5137 | 5137 | source, sbranch, sother, commoninc, incoming = getincoming() |
|
5138 | 5138 | else: |
|
5139 | 5139 | source = sbranch = sother = commoninc = incoming = None |
|
5140 | 5140 | |
|
5141 | 5141 | def getoutgoing(): |
|
5142 | 5142 | dest, branches = hg.parseurl(ui.expandpath('default-push', 'default')) |
|
5143 | 5143 | dbranch = branches[0] |
|
5144 | 5144 | revs, checkout = hg.addbranchrevs(repo, repo, branches, None) |
|
5145 | 5145 | if source != dest: |
|
5146 | 5146 | try: |
|
5147 | 5147 | dother = hg.peer(repo, {}, dest) |
|
5148 | 5148 | except error.RepoError: |
|
5149 | 5149 | if opts.get('remote'): |
|
5150 | 5150 | raise |
|
5151 | 5151 | return dest, dbranch, None, None |
|
5152 | 5152 | ui.debug('comparing with %s\n' % util.hidepassword(dest)) |
|
5153 | 5153 | elif sother is None: |
|
5154 | 5154 | # there is no explicit destination peer, but source one is invalid |
|
5155 | 5155 | return dest, dbranch, None, None |
|
5156 | 5156 | else: |
|
5157 | 5157 | dother = sother |
|
5158 | 5158 | if (source != dest or (sbranch is not None and sbranch != dbranch)): |
|
5159 | 5159 | common = None |
|
5160 | 5160 | else: |
|
5161 | 5161 | common = commoninc |
|
5162 | 5162 | if revs: |
|
5163 | 5163 | revs = [repo.lookup(rev) for rev in revs] |
|
5164 | 5164 | repo.ui.pushbuffer() |
|
5165 | 5165 | outgoing = discovery.findcommonoutgoing(repo, dother, onlyheads=revs, |
|
5166 | 5166 | commoninc=common) |
|
5167 | 5167 | repo.ui.popbuffer() |
|
5168 | 5168 | return dest, dbranch, dother, outgoing |
|
5169 | 5169 | |
|
5170 | 5170 | if needsoutgoing: |
|
5171 | 5171 | dest, dbranch, dother, outgoing = getoutgoing() |
|
5172 | 5172 | else: |
|
5173 | 5173 | dest = dbranch = dother = outgoing = None |
|
5174 | 5174 | |
|
5175 | 5175 | if opts.get('remote'): |
|
5176 | 5176 | t = [] |
|
5177 | 5177 | if incoming: |
|
5178 | 5178 | t.append(_('1 or more incoming')) |
|
5179 | 5179 | o = outgoing.missing |
|
5180 | 5180 | if o: |
|
5181 | 5181 | t.append(_('%d outgoing') % len(o)) |
|
5182 | 5182 | other = dother or sother |
|
5183 | 5183 | if 'bookmarks' in other.listkeys('namespaces'): |
|
5184 | 5184 | counts = bookmarks.summary(repo, other) |
|
5185 | 5185 | if counts[0] > 0: |
|
5186 | 5186 | t.append(_('%d incoming bookmarks') % counts[0]) |
|
5187 | 5187 | if counts[1] > 0: |
|
5188 | 5188 | t.append(_('%d outgoing bookmarks') % counts[1]) |
|
5189 | 5189 | |
|
5190 | 5190 | if t: |
|
5191 | 5191 | # i18n: column positioning for "hg summary" |
|
5192 | 5192 | ui.write(_('remote: %s\n') % (', '.join(t))) |
|
5193 | 5193 | else: |
|
5194 | 5194 | # i18n: column positioning for "hg summary" |
|
5195 | 5195 | ui.status(_('remote: (synced)\n')) |
|
5196 | 5196 | |
|
5197 | 5197 | cmdutil.summaryremotehooks(ui, repo, opts, |
|
5198 | 5198 | ((source, sbranch, sother, commoninc), |
|
5199 | 5199 | (dest, dbranch, dother, outgoing))) |
|
5200 | 5200 | |
|
5201 | 5201 | @command('tag', |
|
5202 | 5202 | [('f', 'force', None, _('force tag')), |
|
5203 | 5203 | ('l', 'local', None, _('make the tag local')), |
|
5204 | 5204 | ('r', 'rev', '', _('revision to tag'), _('REV')), |
|
5205 | 5205 | ('', 'remove', None, _('remove a tag')), |
|
5206 | 5206 | # -l/--local is already there, commitopts cannot be used |
|
5207 | 5207 | ('e', 'edit', None, _('invoke editor on commit messages')), |
|
5208 | 5208 | ('m', 'message', '', _('use text as commit message'), _('TEXT')), |
|
5209 | 5209 | ] + commitopts2, |
|
5210 | 5210 | _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')) |
|
5211 | 5211 | def tag(ui, repo, name1, *names, **opts): |
|
5212 | 5212 | """add one or more tags for the current or given revision |
|
5213 | 5213 | |
|
5214 | 5214 | Name a particular revision using <name>. |
|
5215 | 5215 | |
|
5216 | 5216 | Tags are used to name particular revisions of the repository and are |
|
5217 | 5217 | very useful to compare different revisions, to go back to significant |
|
5218 | 5218 | earlier versions or to mark branch points as releases, etc. Changing |
|
5219 | 5219 | an existing tag is normally disallowed; use -f/--force to override. |
|
5220 | 5220 | |
|
5221 | 5221 | If no revision is given, the parent of the working directory is |
|
5222 | 5222 | used. |
|
5223 | 5223 | |
|
5224 | 5224 | To facilitate version control, distribution, and merging of tags, |
|
5225 | 5225 | they are stored as a file named ".hgtags" which is managed similarly |
|
5226 | 5226 | to other project files and can be hand-edited if necessary. This |
|
5227 | 5227 | also means that tagging creates a new commit. The file |
|
5228 | 5228 | ".hg/localtags" is used for local tags (not shared among |
|
5229 | 5229 | repositories). |
|
5230 | 5230 | |
|
5231 | 5231 | Tag commits are usually made at the head of a branch. If the parent |
|
5232 | 5232 | of the working directory is not a branch head, :hg:`tag` aborts; use |
|
5233 | 5233 | -f/--force to force the tag commit to be based on a non-head |
|
5234 | 5234 | changeset. |
|
5235 | 5235 | |
|
5236 | 5236 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
5237 | 5237 | |
|
5238 | 5238 | Since tag names have priority over branch names during revision |
|
5239 | 5239 | lookup, using an existing branch name as a tag name is discouraged. |
|
5240 | 5240 | |
|
5241 | 5241 | Returns 0 on success. |
|
5242 | 5242 | """ |
|
5243 | 5243 | opts = pycompat.byteskwargs(opts) |
|
5244 | 5244 | wlock = lock = None |
|
5245 | 5245 | try: |
|
5246 | 5246 | wlock = repo.wlock() |
|
5247 | 5247 | lock = repo.lock() |
|
5248 | 5248 | rev_ = "." |
|
5249 | 5249 | names = [t.strip() for t in (name1,) + names] |
|
5250 | 5250 | if len(names) != len(set(names)): |
|
5251 | 5251 | raise error.Abort(_('tag names must be unique')) |
|
5252 | 5252 | for n in names: |
|
5253 | 5253 | scmutil.checknewlabel(repo, n, 'tag') |
|
5254 | 5254 | if not n: |
|
5255 | 5255 | raise error.Abort(_('tag names cannot consist entirely of ' |
|
5256 | 5256 | 'whitespace')) |
|
5257 | 5257 | if opts.get('rev') and opts.get('remove'): |
|
5258 | 5258 | raise error.Abort(_("--rev and --remove are incompatible")) |
|
5259 | 5259 | if opts.get('rev'): |
|
5260 | 5260 | rev_ = opts['rev'] |
|
5261 | 5261 | message = opts.get('message') |
|
5262 | 5262 | if opts.get('remove'): |
|
5263 | 5263 | if opts.get('local'): |
|
5264 | 5264 | expectedtype = 'local' |
|
5265 | 5265 | else: |
|
5266 | 5266 | expectedtype = 'global' |
|
5267 | 5267 | |
|
5268 | 5268 | for n in names: |
|
5269 | 5269 | if not repo.tagtype(n): |
|
5270 | 5270 | raise error.Abort(_("tag '%s' does not exist") % n) |
|
5271 | 5271 | if repo.tagtype(n) != expectedtype: |
|
5272 | 5272 | if expectedtype == 'global': |
|
5273 | 5273 | raise error.Abort(_("tag '%s' is not a global tag") % n) |
|
5274 | 5274 | else: |
|
5275 | 5275 | raise error.Abort(_("tag '%s' is not a local tag") % n) |
|
5276 | 5276 | rev_ = 'null' |
|
5277 | 5277 | if not message: |
|
5278 | 5278 | # we don't translate commit messages |
|
5279 | 5279 | message = 'Removed tag %s' % ', '.join(names) |
|
5280 | 5280 | elif not opts.get('force'): |
|
5281 | 5281 | for n in names: |
|
5282 | 5282 | if n in repo.tags(): |
|
5283 | 5283 | raise error.Abort(_("tag '%s' already exists " |
|
5284 | 5284 | "(use -f to force)") % n) |
|
5285 | 5285 | if not opts.get('local'): |
|
5286 | 5286 | p1, p2 = repo.dirstate.parents() |
|
5287 | 5287 | if p2 != nullid: |
|
5288 | 5288 | raise error.Abort(_('uncommitted merge')) |
|
5289 | 5289 | bheads = repo.branchheads() |
|
5290 | 5290 | if not opts.get('force') and bheads and p1 not in bheads: |
|
5291 | 5291 | raise error.Abort(_('working directory is not at a branch head ' |
|
5292 | 5292 | '(use -f to force)')) |
|
5293 | 5293 | r = scmutil.revsingle(repo, rev_).node() |
|
5294 | 5294 | |
|
5295 | 5295 | if not message: |
|
5296 | 5296 | # we don't translate commit messages |
|
5297 | 5297 | message = ('Added tag %s for changeset %s' % |
|
5298 | 5298 | (', '.join(names), short(r))) |
|
5299 | 5299 | |
|
5300 | 5300 | date = opts.get('date') |
|
5301 | 5301 | if date: |
|
5302 | 5302 | date = util.parsedate(date) |
|
5303 | 5303 | |
|
5304 | 5304 | if opts.get('remove'): |
|
5305 | 5305 | editform = 'tag.remove' |
|
5306 | 5306 | else: |
|
5307 | 5307 | editform = 'tag.add' |
|
5308 | 5308 | editor = cmdutil.getcommiteditor(editform=editform, |
|
5309 | 5309 | **pycompat.strkwargs(opts)) |
|
5310 | 5310 | |
|
5311 | 5311 | # don't allow tagging the null rev |
|
5312 | 5312 | if (not opts.get('remove') and |
|
5313 | 5313 | scmutil.revsingle(repo, rev_).rev() == nullrev): |
|
5314 | 5314 | raise error.Abort(_("cannot tag null revision")) |
|
5315 | 5315 | |
|
5316 | 5316 | tagsmod.tag(repo, names, r, message, opts.get('local'), |
|
5317 | 5317 | opts.get('user'), date, editor=editor) |
|
5318 | 5318 | finally: |
|
5319 | 5319 | release(lock, wlock) |
|
5320 | 5320 | |
|
5321 | 5321 | @command('tags', formatteropts, '', cmdtype=readonly) |
|
5322 | 5322 | def tags(ui, repo, **opts): |
|
5323 | 5323 | """list repository tags |
|
5324 | 5324 | |
|
5325 | 5325 | This lists both regular and local tags. When the -v/--verbose |
|
5326 | 5326 | switch is used, a third column "local" is printed for local tags. |
|
5327 | 5327 | When the -q/--quiet switch is used, only the tag name is printed. |
|
5328 | 5328 | |
|
5329 | 5329 | Returns 0 on success. |
|
5330 | 5330 | """ |
|
5331 | 5331 | |
|
5332 | 5332 | opts = pycompat.byteskwargs(opts) |
|
5333 | 5333 | ui.pager('tags') |
|
5334 | 5334 | fm = ui.formatter('tags', opts) |
|
5335 | 5335 | hexfunc = fm.hexfunc |
|
5336 | 5336 | tagtype = "" |
|
5337 | 5337 | |
|
5338 | 5338 | for t, n in reversed(repo.tagslist()): |
|
5339 | 5339 | hn = hexfunc(n) |
|
5340 | 5340 | label = 'tags.normal' |
|
5341 | 5341 | tagtype = '' |
|
5342 | 5342 | if repo.tagtype(t) == 'local': |
|
5343 | 5343 | label = 'tags.local' |
|
5344 | 5344 | tagtype = 'local' |
|
5345 | 5345 | |
|
5346 | 5346 | fm.startitem() |
|
5347 | 5347 | fm.write('tag', '%s', t, label=label) |
|
5348 | 5348 | fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s' |
|
5349 | 5349 | fm.condwrite(not ui.quiet, 'rev node', fmt, |
|
5350 | 5350 | repo.changelog.rev(n), hn, label=label) |
|
5351 | 5351 | fm.condwrite(ui.verbose and tagtype, 'type', ' %s', |
|
5352 | 5352 | tagtype, label=label) |
|
5353 | 5353 | fm.plain('\n') |
|
5354 | 5354 | fm.end() |
|
5355 | 5355 | |
|
5356 | 5356 | @command('tip', |
|
5357 | 5357 | [('p', 'patch', None, _('show patch')), |
|
5358 | 5358 | ('g', 'git', None, _('use git extended diff format')), |
|
5359 | 5359 | ] + templateopts, |
|
5360 | 5360 | _('[-p] [-g]')) |
|
5361 | 5361 | def tip(ui, repo, **opts): |
|
5362 | 5362 | """show the tip revision (DEPRECATED) |
|
5363 | 5363 | |
|
5364 | 5364 | The tip revision (usually just called the tip) is the changeset |
|
5365 | 5365 | most recently added to the repository (and therefore the most |
|
5366 | 5366 | recently changed head). |
|
5367 | 5367 | |
|
5368 | 5368 | If you have just made a commit, that commit will be the tip. If |
|
5369 | 5369 | you have just pulled changes from another repository, the tip of |
|
5370 | 5370 | that repository becomes the current tip. The "tip" tag is special |
|
5371 | 5371 | and cannot be renamed or assigned to a different changeset. |
|
5372 | 5372 | |
|
5373 | 5373 | This command is deprecated, please use :hg:`heads` instead. |
|
5374 | 5374 | |
|
5375 | 5375 | Returns 0 on success. |
|
5376 | 5376 | """ |
|
5377 | 5377 | opts = pycompat.byteskwargs(opts) |
|
5378 | 5378 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
5379 | 5379 | displayer.show(repo['tip']) |
|
5380 | 5380 | displayer.close() |
|
5381 | 5381 | |
|
5382 | 5382 | @command('unbundle', |
|
5383 | 5383 | [('u', 'update', None, |
|
5384 | 5384 | _('update to new branch head if changesets were unbundled'))], |
|
5385 | 5385 | _('[-u] FILE...')) |
|
5386 | 5386 | def unbundle(ui, repo, fname1, *fnames, **opts): |
|
5387 | 5387 | """apply one or more bundle files |
|
5388 | 5388 | |
|
5389 | 5389 | Apply one or more bundle files generated by :hg:`bundle`. |
|
5390 | 5390 | |
|
5391 | 5391 | Returns 0 on success, 1 if an update has unresolved files. |
|
5392 | 5392 | """ |
|
5393 | 5393 | fnames = (fname1,) + fnames |
|
5394 | 5394 | |
|
5395 | 5395 | with repo.lock(): |
|
5396 | 5396 | for fname in fnames: |
|
5397 | 5397 | f = hg.openpath(ui, fname) |
|
5398 | 5398 | gen = exchange.readbundle(ui, f, fname) |
|
5399 | 5399 | if isinstance(gen, streamclone.streamcloneapplier): |
|
5400 | 5400 | raise error.Abort( |
|
5401 | 5401 | _('packed bundles cannot be applied with ' |
|
5402 | 5402 | '"hg unbundle"'), |
|
5403 | 5403 | hint=_('use "hg debugapplystreamclonebundle"')) |
|
5404 | 5404 | url = 'bundle:' + fname |
|
5405 | 5405 | try: |
|
5406 | 5406 | txnname = 'unbundle' |
|
5407 | 5407 | if not isinstance(gen, bundle2.unbundle20): |
|
5408 | 5408 | txnname = 'unbundle\n%s' % util.hidepassword(url) |
|
5409 | 5409 | with repo.transaction(txnname) as tr: |
|
5410 | 5410 | op = bundle2.applybundle(repo, gen, tr, source='unbundle', |
|
5411 | 5411 | url=url) |
|
5412 | 5412 | except error.BundleUnknownFeatureError as exc: |
|
5413 | 5413 | raise error.Abort( |
|
5414 | 5414 | _('%s: unknown bundle feature, %s') % (fname, exc), |
|
5415 | 5415 | hint=_("see https://mercurial-scm.org/" |
|
5416 | 5416 | "wiki/BundleFeature for more " |
|
5417 | 5417 | "information")) |
|
5418 | 5418 | modheads = bundle2.combinechangegroupresults(op) |
|
5419 | 5419 | |
|
5420 | 5420 | return postincoming(ui, repo, modheads, opts.get(r'update'), None, None) |
|
5421 | 5421 | |
|
5422 | 5422 | @command('^update|up|checkout|co', |
|
5423 | 5423 | [('C', 'clean', None, _('discard uncommitted changes (no backup)')), |
|
5424 | 5424 | ('c', 'check', None, _('require clean working directory')), |
|
5425 | 5425 | ('m', 'merge', None, _('merge uncommitted changes')), |
|
5426 | 5426 | ('d', 'date', '', _('tipmost revision matching date'), _('DATE')), |
|
5427 | 5427 | ('r', 'rev', '', _('revision'), _('REV')) |
|
5428 | 5428 | ] + mergetoolopts, |
|
5429 | 5429 | _('[-C|-c|-m] [-d DATE] [[-r] REV]')) |
|
5430 | 5430 | def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False, |
|
5431 | 5431 | merge=None, tool=None): |
|
5432 | 5432 | """update working directory (or switch revisions) |
|
5433 | 5433 | |
|
5434 | 5434 | Update the repository's working directory to the specified |
|
5435 | 5435 | changeset. If no changeset is specified, update to the tip of the |
|
5436 | 5436 | current named branch and move the active bookmark (see :hg:`help |
|
5437 | 5437 | bookmarks`). |
|
5438 | 5438 | |
|
5439 | 5439 | Update sets the working directory's parent revision to the specified |
|
5440 | 5440 | changeset (see :hg:`help parents`). |
|
5441 | 5441 | |
|
5442 | 5442 | If the changeset is not a descendant or ancestor of the working |
|
5443 | 5443 | directory's parent and there are uncommitted changes, the update is |
|
5444 | 5444 | aborted. With the -c/--check option, the working directory is checked |
|
5445 | 5445 | for uncommitted changes; if none are found, the working directory is |
|
5446 | 5446 | updated to the specified changeset. |
|
5447 | 5447 | |
|
5448 | 5448 | .. container:: verbose |
|
5449 | 5449 | |
|
5450 | 5450 | The -C/--clean, -c/--check, and -m/--merge options control what |
|
5451 | 5451 | happens if the working directory contains uncommitted changes. |
|
5452 | 5452 | At most of one of them can be specified. |
|
5453 | 5453 | |
|
5454 | 5454 | 1. If no option is specified, and if |
|
5455 | 5455 | the requested changeset is an ancestor or descendant of |
|
5456 | 5456 | the working directory's parent, the uncommitted changes |
|
5457 | 5457 | are merged into the requested changeset and the merged |
|
5458 | 5458 | result is left uncommitted. If the requested changeset is |
|
5459 | 5459 | not an ancestor or descendant (that is, it is on another |
|
5460 | 5460 | branch), the update is aborted and the uncommitted changes |
|
5461 | 5461 | are preserved. |
|
5462 | 5462 | |
|
5463 | 5463 | 2. With the -m/--merge option, the update is allowed even if the |
|
5464 | 5464 | requested changeset is not an ancestor or descendant of |
|
5465 | 5465 | the working directory's parent. |
|
5466 | 5466 | |
|
5467 | 5467 | 3. With the -c/--check option, the update is aborted and the |
|
5468 | 5468 | uncommitted changes are preserved. |
|
5469 | 5469 | |
|
5470 | 5470 | 4. With the -C/--clean option, uncommitted changes are discarded and |
|
5471 | 5471 | the working directory is updated to the requested changeset. |
|
5472 | 5472 | |
|
5473 | 5473 | To cancel an uncommitted merge (and lose your changes), use |
|
5474 | 5474 | :hg:`merge --abort`. |
|
5475 | 5475 | |
|
5476 | 5476 | Use null as the changeset to remove the working directory (like |
|
5477 | 5477 | :hg:`clone -U`). |
|
5478 | 5478 | |
|
5479 | 5479 | If you want to revert just one file to an older revision, use |
|
5480 | 5480 | :hg:`revert [-r REV] NAME`. |
|
5481 | 5481 | |
|
5482 | 5482 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
5483 | 5483 | |
|
5484 | 5484 | Returns 0 on success, 1 if there are unresolved files. |
|
5485 | 5485 | """ |
|
5486 | 5486 | if rev and node: |
|
5487 | 5487 | raise error.Abort(_("please specify just one revision")) |
|
5488 | 5488 | |
|
5489 | 5489 | if ui.configbool('commands', 'update.requiredest'): |
|
5490 | 5490 | if not node and not rev and not date: |
|
5491 | 5491 | raise error.Abort(_('you must specify a destination'), |
|
5492 | 5492 | hint=_('for example: hg update ".::"')) |
|
5493 | 5493 | |
|
5494 | 5494 | if rev is None or rev == '': |
|
5495 | 5495 | rev = node |
|
5496 | 5496 | |
|
5497 | 5497 | if date and rev is not None: |
|
5498 | 5498 | raise error.Abort(_("you can't specify a revision and a date")) |
|
5499 | 5499 | |
|
5500 | 5500 | if len([x for x in (clean, check, merge) if x]) > 1: |
|
5501 | 5501 | raise error.Abort(_("can only specify one of -C/--clean, -c/--check, " |
|
5502 | 5502 | "or -m/--merge")) |
|
5503 | 5503 | |
|
5504 | 5504 | updatecheck = None |
|
5505 | 5505 | if check: |
|
5506 | 5506 | updatecheck = 'abort' |
|
5507 | 5507 | elif merge: |
|
5508 | 5508 | updatecheck = 'none' |
|
5509 | 5509 | |
|
5510 | 5510 | with repo.wlock(): |
|
5511 | 5511 | cmdutil.clearunfinished(repo) |
|
5512 | 5512 | |
|
5513 | 5513 | if date: |
|
5514 | 5514 | rev = cmdutil.finddate(ui, repo, date) |
|
5515 | 5515 | |
|
5516 | 5516 | # if we defined a bookmark, we have to remember the original name |
|
5517 | 5517 | brev = rev |
|
5518 | 5518 | if rev: |
|
5519 | 5519 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
5520 | 5520 | ctx = scmutil.revsingle(repo, rev, rev) |
|
5521 | 5521 | rev = ctx.rev() |
|
5522 | 5522 | if ctx.hidden(): |
|
5523 | 5523 | ctxstr = ctx.hex()[:12] |
|
5524 | 5524 | ui.warn(_("updating to a hidden changeset %s\n") % ctxstr) |
|
5525 | 5525 | |
|
5526 | 5526 | if ctx.obsolete(): |
|
5527 | 5527 | obsfatemsg = obsutil._getfilteredreason(repo, ctxstr, ctx) |
|
5528 | 5528 | ui.warn("(%s)\n" % obsfatemsg) |
|
5529 | 5529 | |
|
5530 | 5530 | repo.ui.setconfig('ui', 'forcemerge', tool, 'update') |
|
5531 | 5531 | |
|
5532 | 5532 | return hg.updatetotally(ui, repo, rev, brev, clean=clean, |
|
5533 | 5533 | updatecheck=updatecheck) |
|
5534 | 5534 | |
|
5535 | 5535 | @command('verify', []) |
|
5536 | 5536 | def verify(ui, repo): |
|
5537 | 5537 | """verify the integrity of the repository |
|
5538 | 5538 | |
|
5539 | 5539 | Verify the integrity of the current repository. |
|
5540 | 5540 | |
|
5541 | 5541 | This will perform an extensive check of the repository's |
|
5542 | 5542 | integrity, validating the hashes and checksums of each entry in |
|
5543 | 5543 | the changelog, manifest, and tracked files, as well as the |
|
5544 | 5544 | integrity of their crosslinks and indices. |
|
5545 | 5545 | |
|
5546 | 5546 | Please see https://mercurial-scm.org/wiki/RepositoryCorruption |
|
5547 | 5547 | for more information about recovery from corruption of the |
|
5548 | 5548 | repository. |
|
5549 | 5549 | |
|
5550 | 5550 | Returns 0 on success, 1 if errors are encountered. |
|
5551 | 5551 | """ |
|
5552 | 5552 | return hg.verify(repo) |
|
5553 | 5553 | |
|
5554 | 5554 | @command('version', [] + formatteropts, norepo=True, cmdtype=readonly) |
|
5555 | 5555 | def version_(ui, **opts): |
|
5556 | 5556 | """output version and copyright information""" |
|
5557 | 5557 | opts = pycompat.byteskwargs(opts) |
|
5558 | 5558 | if ui.verbose: |
|
5559 | 5559 | ui.pager('version') |
|
5560 | 5560 | fm = ui.formatter("version", opts) |
|
5561 | 5561 | fm.startitem() |
|
5562 | 5562 | fm.write("ver", _("Mercurial Distributed SCM (version %s)\n"), |
|
5563 | 5563 | util.version()) |
|
5564 | 5564 | license = _( |
|
5565 | 5565 | "(see https://mercurial-scm.org for more information)\n" |
|
5566 | 5566 | "\nCopyright (C) 2005-2018 Matt Mackall and others\n" |
|
5567 | 5567 | "This is free software; see the source for copying conditions. " |
|
5568 | 5568 | "There is NO\nwarranty; " |
|
5569 | 5569 | "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n" |
|
5570 | 5570 | ) |
|
5571 | 5571 | if not ui.quiet: |
|
5572 | 5572 | fm.plain(license) |
|
5573 | 5573 | |
|
5574 | 5574 | if ui.verbose: |
|
5575 | 5575 | fm.plain(_("\nEnabled extensions:\n\n")) |
|
5576 | 5576 | # format names and versions into columns |
|
5577 | 5577 | names = [] |
|
5578 | 5578 | vers = [] |
|
5579 | 5579 | isinternals = [] |
|
5580 | 5580 | for name, module in extensions.extensions(): |
|
5581 | 5581 | names.append(name) |
|
5582 | 5582 | vers.append(extensions.moduleversion(module) or None) |
|
5583 | 5583 | isinternals.append(extensions.ismoduleinternal(module)) |
|
5584 | 5584 | fn = fm.nested("extensions") |
|
5585 | 5585 | if names: |
|
5586 | 5586 | namefmt = " %%-%ds " % max(len(n) for n in names) |
|
5587 | 5587 | places = [_("external"), _("internal")] |
|
5588 | 5588 | for n, v, p in zip(names, vers, isinternals): |
|
5589 | 5589 | fn.startitem() |
|
5590 | 5590 | fn.condwrite(ui.verbose, "name", namefmt, n) |
|
5591 | 5591 | if ui.verbose: |
|
5592 | 5592 | fn.plain("%s " % places[p]) |
|
5593 | 5593 | fn.data(bundled=p) |
|
5594 | 5594 | fn.condwrite(ui.verbose and v, "ver", "%s", v) |
|
5595 | 5595 | if ui.verbose: |
|
5596 | 5596 | fn.plain("\n") |
|
5597 | 5597 | fn.end() |
|
5598 | 5598 | fm.end() |
|
5599 | 5599 | |
|
5600 | 5600 | def loadcmdtable(ui, name, cmdtable): |
|
5601 | 5601 | """Load command functions from specified cmdtable |
|
5602 | 5602 | """ |
|
5603 | 5603 | overrides = [cmd for cmd in cmdtable if cmd in table] |
|
5604 | 5604 | if overrides: |
|
5605 | 5605 | ui.warn(_("extension '%s' overrides commands: %s\n") |
|
5606 | 5606 | % (name, " ".join(overrides))) |
|
5607 | 5607 | table.update(cmdtable) |
@@ -1,2261 +1,2263 b'' | |||
|
1 | 1 | # exchange.py - utility to exchange data between repos. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import errno |
|
12 | 12 | import hashlib |
|
13 | 13 | |
|
14 | 14 | from .i18n import _ |
|
15 | 15 | from .node import ( |
|
16 | 16 | bin, |
|
17 | 17 | hex, |
|
18 | 18 | nullid, |
|
19 | 19 | ) |
|
20 | 20 | from . import ( |
|
21 | 21 | bookmarks as bookmod, |
|
22 | 22 | bundle2, |
|
23 | 23 | changegroup, |
|
24 | 24 | discovery, |
|
25 | 25 | error, |
|
26 | 26 | lock as lockmod, |
|
27 | 27 | logexchange, |
|
28 | 28 | obsolete, |
|
29 | 29 | phases, |
|
30 | 30 | pushkey, |
|
31 | 31 | pycompat, |
|
32 | 32 | scmutil, |
|
33 | 33 | sslutil, |
|
34 | 34 | streamclone, |
|
35 | 35 | url as urlmod, |
|
36 | 36 | util, |
|
37 | 37 | ) |
|
38 | 38 | |
|
39 | 39 | urlerr = util.urlerr |
|
40 | 40 | urlreq = util.urlreq |
|
41 | 41 | |
|
42 | 42 | # Maps bundle version human names to changegroup versions. |
|
43 | 43 | _bundlespeccgversions = {'v1': '01', |
|
44 | 44 | 'v2': '02', |
|
45 | 45 | 'packed1': 's1', |
|
46 | 46 | 'bundle2': '02', #legacy |
|
47 | 47 | } |
|
48 | 48 | |
|
49 | 49 | # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE. |
|
50 | 50 | _bundlespecv1compengines = {'gzip', 'bzip2', 'none'} |
|
51 | 51 | |
|
52 | 52 | def parsebundlespec(repo, spec, strict=True, externalnames=False): |
|
53 | 53 | """Parse a bundle string specification into parts. |
|
54 | 54 | |
|
55 | 55 | Bundle specifications denote a well-defined bundle/exchange format. |
|
56 | 56 | The content of a given specification should not change over time in |
|
57 | 57 | order to ensure that bundles produced by a newer version of Mercurial are |
|
58 | 58 | readable from an older version. |
|
59 | 59 | |
|
60 | 60 | The string currently has the form: |
|
61 | 61 | |
|
62 | 62 | <compression>-<type>[;<parameter0>[;<parameter1>]] |
|
63 | 63 | |
|
64 | 64 | Where <compression> is one of the supported compression formats |
|
65 | 65 | and <type> is (currently) a version string. A ";" can follow the type and |
|
66 | 66 | all text afterwards is interpreted as URI encoded, ";" delimited key=value |
|
67 | 67 | pairs. |
|
68 | 68 | |
|
69 | 69 | If ``strict`` is True (the default) <compression> is required. Otherwise, |
|
70 | 70 | it is optional. |
|
71 | 71 | |
|
72 | 72 | If ``externalnames`` is False (the default), the human-centric names will |
|
73 | 73 | be converted to their internal representation. |
|
74 | 74 | |
|
75 | 75 | Returns a 3-tuple of (compression, version, parameters). Compression will |
|
76 | 76 | be ``None`` if not in strict mode and a compression isn't defined. |
|
77 | 77 | |
|
78 | 78 | An ``InvalidBundleSpecification`` is raised when the specification is |
|
79 | 79 | not syntactically well formed. |
|
80 | 80 | |
|
81 | 81 | An ``UnsupportedBundleSpecification`` is raised when the compression or |
|
82 | 82 | bundle type/version is not recognized. |
|
83 | 83 | |
|
84 | 84 | Note: this function will likely eventually return a more complex data |
|
85 | 85 | structure, including bundle2 part information. |
|
86 | 86 | """ |
|
87 | 87 | def parseparams(s): |
|
88 | 88 | if ';' not in s: |
|
89 | 89 | return s, {} |
|
90 | 90 | |
|
91 | 91 | params = {} |
|
92 | 92 | version, paramstr = s.split(';', 1) |
|
93 | 93 | |
|
94 | 94 | for p in paramstr.split(';'): |
|
95 | 95 | if '=' not in p: |
|
96 | 96 | raise error.InvalidBundleSpecification( |
|
97 | 97 | _('invalid bundle specification: ' |
|
98 | 98 | 'missing "=" in parameter: %s') % p) |
|
99 | 99 | |
|
100 | 100 | key, value = p.split('=', 1) |
|
101 | 101 | key = urlreq.unquote(key) |
|
102 | 102 | value = urlreq.unquote(value) |
|
103 | 103 | params[key] = value |
|
104 | 104 | |
|
105 | 105 | return version, params |
|
106 | 106 | |
|
107 | 107 | |
|
108 | 108 | if strict and '-' not in spec: |
|
109 | 109 | raise error.InvalidBundleSpecification( |
|
110 | 110 | _('invalid bundle specification; ' |
|
111 | 111 | 'must be prefixed with compression: %s') % spec) |
|
112 | 112 | |
|
113 | 113 | if '-' in spec: |
|
114 | 114 | compression, version = spec.split('-', 1) |
|
115 | 115 | |
|
116 | 116 | if compression not in util.compengines.supportedbundlenames: |
|
117 | 117 | raise error.UnsupportedBundleSpecification( |
|
118 | 118 | _('%s compression is not supported') % compression) |
|
119 | 119 | |
|
120 | 120 | version, params = parseparams(version) |
|
121 | 121 | |
|
122 | 122 | if version not in _bundlespeccgversions: |
|
123 | 123 | raise error.UnsupportedBundleSpecification( |
|
124 | 124 | _('%s is not a recognized bundle version') % version) |
|
125 | 125 | else: |
|
126 | 126 | # Value could be just the compression or just the version, in which |
|
127 | 127 | # case some defaults are assumed (but only when not in strict mode). |
|
128 | 128 | assert not strict |
|
129 | 129 | |
|
130 | 130 | spec, params = parseparams(spec) |
|
131 | 131 | |
|
132 | 132 | if spec in util.compengines.supportedbundlenames: |
|
133 | 133 | compression = spec |
|
134 | 134 | version = 'v1' |
|
135 | 135 | # Generaldelta repos require v2. |
|
136 | 136 | if 'generaldelta' in repo.requirements: |
|
137 | 137 | version = 'v2' |
|
138 | 138 | # Modern compression engines require v2. |
|
139 | 139 | if compression not in _bundlespecv1compengines: |
|
140 | 140 | version = 'v2' |
|
141 | 141 | elif spec in _bundlespeccgversions: |
|
142 | 142 | if spec == 'packed1': |
|
143 | 143 | compression = 'none' |
|
144 | 144 | else: |
|
145 | 145 | compression = 'bzip2' |
|
146 | 146 | version = spec |
|
147 | 147 | else: |
|
148 | 148 | raise error.UnsupportedBundleSpecification( |
|
149 | 149 | _('%s is not a recognized bundle specification') % spec) |
|
150 | 150 | |
|
151 | 151 | # Bundle version 1 only supports a known set of compression engines. |
|
152 | 152 | if version == 'v1' and compression not in _bundlespecv1compengines: |
|
153 | 153 | raise error.UnsupportedBundleSpecification( |
|
154 | 154 | _('compression engine %s is not supported on v1 bundles') % |
|
155 | 155 | compression) |
|
156 | 156 | |
|
157 | 157 | # The specification for packed1 can optionally declare the data formats |
|
158 | 158 | # required to apply it. If we see this metadata, compare against what the |
|
159 | 159 | # repo supports and error if the bundle isn't compatible. |
|
160 | 160 | if version == 'packed1' and 'requirements' in params: |
|
161 | 161 | requirements = set(params['requirements'].split(',')) |
|
162 | 162 | missingreqs = requirements - repo.supportedformats |
|
163 | 163 | if missingreqs: |
|
164 | 164 | raise error.UnsupportedBundleSpecification( |
|
165 | 165 | _('missing support for repository features: %s') % |
|
166 | 166 | ', '.join(sorted(missingreqs))) |
|
167 | 167 | |
|
168 | 168 | if not externalnames: |
|
169 | 169 | engine = util.compengines.forbundlename(compression) |
|
170 | 170 | compression = engine.bundletype()[1] |
|
171 | 171 | version = _bundlespeccgversions[version] |
|
172 | 172 | return compression, version, params |
|
173 | 173 | |
|
174 | 174 | def readbundle(ui, fh, fname, vfs=None): |
|
175 | 175 | header = changegroup.readexactly(fh, 4) |
|
176 | 176 | |
|
177 | 177 | alg = None |
|
178 | 178 | if not fname: |
|
179 | 179 | fname = "stream" |
|
180 | 180 | if not header.startswith('HG') and header.startswith('\0'): |
|
181 | 181 | fh = changegroup.headerlessfixup(fh, header) |
|
182 | 182 | header = "HG10" |
|
183 | 183 | alg = 'UN' |
|
184 | 184 | elif vfs: |
|
185 | 185 | fname = vfs.join(fname) |
|
186 | 186 | |
|
187 | 187 | magic, version = header[0:2], header[2:4] |
|
188 | 188 | |
|
189 | 189 | if magic != 'HG': |
|
190 | 190 | raise error.Abort(_('%s: not a Mercurial bundle') % fname) |
|
191 | 191 | if version == '10': |
|
192 | 192 | if alg is None: |
|
193 | 193 | alg = changegroup.readexactly(fh, 2) |
|
194 | 194 | return changegroup.cg1unpacker(fh, alg) |
|
195 | 195 | elif version.startswith('2'): |
|
196 | 196 | return bundle2.getunbundler(ui, fh, magicstring=magic + version) |
|
197 | 197 | elif version == 'S1': |
|
198 | 198 | return streamclone.streamcloneapplier(fh) |
|
199 | 199 | else: |
|
200 | 200 | raise error.Abort(_('%s: unknown bundle version %s') % (fname, version)) |
|
201 | 201 | |
|
202 | 202 | def _formatrequirementsspec(requirements): |
|
203 | 203 | return urlreq.quote(','.join(sorted(requirements))) |
|
204 | 204 | |
|
205 | 205 | def _formatrequirementsparams(requirements): |
|
206 | 206 | requirements = _formatrequirementsspec(requirements) |
|
207 | 207 | params = "%s%s" % (urlreq.quote("requirements="), requirements) |
|
208 | 208 | return params |
|
209 | 209 | |
|
210 | 210 | def getbundlespec(ui, fh): |
|
211 | 211 | """Infer the bundlespec from a bundle file handle. |
|
212 | 212 | |
|
213 | 213 | The input file handle is seeked and the original seek position is not |
|
214 | 214 | restored. |
|
215 | 215 | """ |
|
216 | 216 | def speccompression(alg): |
|
217 | 217 | try: |
|
218 | 218 | return util.compengines.forbundletype(alg).bundletype()[0] |
|
219 | 219 | except KeyError: |
|
220 | 220 | return None |
|
221 | 221 | |
|
222 | 222 | b = readbundle(ui, fh, None) |
|
223 | 223 | if isinstance(b, changegroup.cg1unpacker): |
|
224 | 224 | alg = b._type |
|
225 | 225 | if alg == '_truncatedBZ': |
|
226 | 226 | alg = 'BZ' |
|
227 | 227 | comp = speccompression(alg) |
|
228 | 228 | if not comp: |
|
229 | 229 | raise error.Abort(_('unknown compression algorithm: %s') % alg) |
|
230 | 230 | return '%s-v1' % comp |
|
231 | 231 | elif isinstance(b, bundle2.unbundle20): |
|
232 | 232 | if 'Compression' in b.params: |
|
233 | 233 | comp = speccompression(b.params['Compression']) |
|
234 | 234 | if not comp: |
|
235 | 235 | raise error.Abort(_('unknown compression algorithm: %s') % comp) |
|
236 | 236 | else: |
|
237 | 237 | comp = 'none' |
|
238 | 238 | |
|
239 | 239 | version = None |
|
240 | 240 | for part in b.iterparts(): |
|
241 | 241 | if part.type == 'changegroup': |
|
242 | 242 | version = part.params['version'] |
|
243 | 243 | if version in ('01', '02'): |
|
244 | 244 | version = 'v2' |
|
245 | 245 | else: |
|
246 | 246 | raise error.Abort(_('changegroup version %s does not have ' |
|
247 | 247 | 'a known bundlespec') % version, |
|
248 | 248 | hint=_('try upgrading your Mercurial ' |
|
249 | 249 | 'client')) |
|
250 | 250 | |
|
251 | 251 | if not version: |
|
252 | 252 | raise error.Abort(_('could not identify changegroup version in ' |
|
253 | 253 | 'bundle')) |
|
254 | 254 | |
|
255 | 255 | return '%s-%s' % (comp, version) |
|
256 | 256 | elif isinstance(b, streamclone.streamcloneapplier): |
|
257 | 257 | requirements = streamclone.readbundle1header(fh)[2] |
|
258 | 258 | return 'none-packed1;%s' % _formatrequirementsparams(requirements) |
|
259 | 259 | else: |
|
260 | 260 | raise error.Abort(_('unknown bundle type: %s') % b) |
|
261 | 261 | |
|
262 | 262 | def _computeoutgoing(repo, heads, common): |
|
263 | 263 | """Computes which revs are outgoing given a set of common |
|
264 | 264 | and a set of heads. |
|
265 | 265 | |
|
266 | 266 | This is a separate function so extensions can have access to |
|
267 | 267 | the logic. |
|
268 | 268 | |
|
269 | 269 | Returns a discovery.outgoing object. |
|
270 | 270 | """ |
|
271 | 271 | cl = repo.changelog |
|
272 | 272 | if common: |
|
273 | 273 | hasnode = cl.hasnode |
|
274 | 274 | common = [n for n in common if hasnode(n)] |
|
275 | 275 | else: |
|
276 | 276 | common = [nullid] |
|
277 | 277 | if not heads: |
|
278 | 278 | heads = cl.heads() |
|
279 | 279 | return discovery.outgoing(repo, common, heads) |
|
280 | 280 | |
|
281 | 281 | def _forcebundle1(op): |
|
282 | 282 | """return true if a pull/push must use bundle1 |
|
283 | 283 | |
|
284 | 284 | This function is used to allow testing of the older bundle version""" |
|
285 | 285 | ui = op.repo.ui |
|
286 | 286 | forcebundle1 = False |
|
287 | 287 | # The goal is this config is to allow developer to choose the bundle |
|
288 | 288 | # version used during exchanged. This is especially handy during test. |
|
289 | 289 | # Value is a list of bundle version to be picked from, highest version |
|
290 | 290 | # should be used. |
|
291 | 291 | # |
|
292 | 292 | # developer config: devel.legacy.exchange |
|
293 | 293 | exchange = ui.configlist('devel', 'legacy.exchange') |
|
294 | 294 | forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange |
|
295 | 295 | return forcebundle1 or not op.remote.capable('bundle2') |
|
296 | 296 | |
|
297 | 297 | class pushoperation(object): |
|
298 | 298 | """A object that represent a single push operation |
|
299 | 299 | |
|
300 | 300 | Its purpose is to carry push related state and very common operations. |
|
301 | 301 | |
|
302 | 302 | A new pushoperation should be created at the beginning of each push and |
|
303 | 303 | discarded afterward. |
|
304 | 304 | """ |
|
305 | 305 | |
|
306 | 306 | def __init__(self, repo, remote, force=False, revs=None, newbranch=False, |
|
307 | 307 | bookmarks=(), pushvars=None): |
|
308 | 308 | # repo we push from |
|
309 | 309 | self.repo = repo |
|
310 | 310 | self.ui = repo.ui |
|
311 | 311 | # repo we push to |
|
312 | 312 | self.remote = remote |
|
313 | 313 | # force option provided |
|
314 | 314 | self.force = force |
|
315 | 315 | # revs to be pushed (None is "all") |
|
316 | 316 | self.revs = revs |
|
317 | 317 | # bookmark explicitly pushed |
|
318 | 318 | self.bookmarks = bookmarks |
|
319 | 319 | # allow push of new branch |
|
320 | 320 | self.newbranch = newbranch |
|
321 | 321 | # step already performed |
|
322 | 322 | # (used to check what steps have been already performed through bundle2) |
|
323 | 323 | self.stepsdone = set() |
|
324 | 324 | # Integer version of the changegroup push result |
|
325 | 325 | # - None means nothing to push |
|
326 | 326 | # - 0 means HTTP error |
|
327 | 327 | # - 1 means we pushed and remote head count is unchanged *or* |
|
328 | 328 | # we have outgoing changesets but refused to push |
|
329 | 329 | # - other values as described by addchangegroup() |
|
330 | 330 | self.cgresult = None |
|
331 | 331 | # Boolean value for the bookmark push |
|
332 | 332 | self.bkresult = None |
|
333 | 333 | # discover.outgoing object (contains common and outgoing data) |
|
334 | 334 | self.outgoing = None |
|
335 | 335 | # all remote topological heads before the push |
|
336 | 336 | self.remoteheads = None |
|
337 | 337 | # Details of the remote branch pre and post push |
|
338 | 338 | # |
|
339 | 339 | # mapping: {'branch': ([remoteheads], |
|
340 | 340 | # [newheads], |
|
341 | 341 | # [unsyncedheads], |
|
342 | 342 | # [discardedheads])} |
|
343 | 343 | # - branch: the branch name |
|
344 | 344 | # - remoteheads: the list of remote heads known locally |
|
345 | 345 | # None if the branch is new |
|
346 | 346 | # - newheads: the new remote heads (known locally) with outgoing pushed |
|
347 | 347 | # - unsyncedheads: the list of remote heads unknown locally. |
|
348 | 348 | # - discardedheads: the list of remote heads made obsolete by the push |
|
349 | 349 | self.pushbranchmap = None |
|
350 | 350 | # testable as a boolean indicating if any nodes are missing locally. |
|
351 | 351 | self.incoming = None |
|
352 | 352 | # summary of the remote phase situation |
|
353 | 353 | self.remotephases = None |
|
354 | 354 | # phases changes that must be pushed along side the changesets |
|
355 | 355 | self.outdatedphases = None |
|
356 | 356 | # phases changes that must be pushed if changeset push fails |
|
357 | 357 | self.fallbackoutdatedphases = None |
|
358 | 358 | # outgoing obsmarkers |
|
359 | 359 | self.outobsmarkers = set() |
|
360 | 360 | # outgoing bookmarks |
|
361 | 361 | self.outbookmarks = [] |
|
362 | 362 | # transaction manager |
|
363 | 363 | self.trmanager = None |
|
364 | 364 | # map { pushkey partid -> callback handling failure} |
|
365 | 365 | # used to handle exception from mandatory pushkey part failure |
|
366 | 366 | self.pkfailcb = {} |
|
367 | 367 | # an iterable of pushvars or None |
|
368 | 368 | self.pushvars = pushvars |
|
369 | 369 | |
|
370 | 370 | @util.propertycache |
|
371 | 371 | def futureheads(self): |
|
372 | 372 | """future remote heads if the changeset push succeeds""" |
|
373 | 373 | return self.outgoing.missingheads |
|
374 | 374 | |
|
375 | 375 | @util.propertycache |
|
376 | 376 | def fallbackheads(self): |
|
377 | 377 | """future remote heads if the changeset push fails""" |
|
378 | 378 | if self.revs is None: |
|
379 | 379 | # not target to push, all common are relevant |
|
380 | 380 | return self.outgoing.commonheads |
|
381 | 381 | unfi = self.repo.unfiltered() |
|
382 | 382 | # I want cheads = heads(::missingheads and ::commonheads) |
|
383 | 383 | # (missingheads is revs with secret changeset filtered out) |
|
384 | 384 | # |
|
385 | 385 | # This can be expressed as: |
|
386 | 386 | # cheads = ( (missingheads and ::commonheads) |
|
387 | 387 | # + (commonheads and ::missingheads))" |
|
388 | 388 | # ) |
|
389 | 389 | # |
|
390 | 390 | # while trying to push we already computed the following: |
|
391 | 391 | # common = (::commonheads) |
|
392 | 392 | # missing = ((commonheads::missingheads) - commonheads) |
|
393 | 393 | # |
|
394 | 394 | # We can pick: |
|
395 | 395 | # * missingheads part of common (::commonheads) |
|
396 | 396 | common = self.outgoing.common |
|
397 | 397 | nm = self.repo.changelog.nodemap |
|
398 | 398 | cheads = [node for node in self.revs if nm[node] in common] |
|
399 | 399 | # and |
|
400 | 400 | # * commonheads parents on missing |
|
401 | 401 | revset = unfi.set('%ln and parents(roots(%ln))', |
|
402 | 402 | self.outgoing.commonheads, |
|
403 | 403 | self.outgoing.missing) |
|
404 | 404 | cheads.extend(c.node() for c in revset) |
|
405 | 405 | return cheads |
|
406 | 406 | |
|
407 | 407 | @property |
|
408 | 408 | def commonheads(self): |
|
409 | 409 | """set of all common heads after changeset bundle push""" |
|
410 | 410 | if self.cgresult: |
|
411 | 411 | return self.futureheads |
|
412 | 412 | else: |
|
413 | 413 | return self.fallbackheads |
|
414 | 414 | |
|
415 | 415 | # mapping of message used when pushing bookmark |
|
416 | 416 | bookmsgmap = {'update': (_("updating bookmark %s\n"), |
|
417 | 417 | _('updating bookmark %s failed!\n')), |
|
418 | 418 | 'export': (_("exporting bookmark %s\n"), |
|
419 | 419 | _('exporting bookmark %s failed!\n')), |
|
420 | 420 | 'delete': (_("deleting remote bookmark %s\n"), |
|
421 | 421 | _('deleting remote bookmark %s failed!\n')), |
|
422 | 422 | } |
|
423 | 423 | |
|
424 | 424 | |
|
425 | 425 | def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(), |
|
426 | 426 | opargs=None): |
|
427 | 427 | '''Push outgoing changesets (limited by revs) from a local |
|
428 | 428 | repository to remote. Return an integer: |
|
429 | 429 | - None means nothing to push |
|
430 | 430 | - 0 means HTTP error |
|
431 | 431 | - 1 means we pushed and remote head count is unchanged *or* |
|
432 | 432 | we have outgoing changesets but refused to push |
|
433 | 433 | - other values as described by addchangegroup() |
|
434 | 434 | ''' |
|
435 | 435 | if opargs is None: |
|
436 | 436 | opargs = {} |
|
437 | 437 | pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks, |
|
438 | 438 | **pycompat.strkwargs(opargs)) |
|
439 | 439 | if pushop.remote.local(): |
|
440 | 440 | missing = (set(pushop.repo.requirements) |
|
441 | 441 | - pushop.remote.local().supported) |
|
442 | 442 | if missing: |
|
443 | 443 | msg = _("required features are not" |
|
444 | 444 | " supported in the destination:" |
|
445 | 445 | " %s") % (', '.join(sorted(missing))) |
|
446 | 446 | raise error.Abort(msg) |
|
447 | 447 | |
|
448 | 448 | if not pushop.remote.canpush(): |
|
449 | 449 | raise error.Abort(_("destination does not support push")) |
|
450 | 450 | |
|
451 | 451 | if not pushop.remote.capable('unbundle'): |
|
452 | 452 | raise error.Abort(_('cannot push: destination does not support the ' |
|
453 | 453 | 'unbundle wire protocol command')) |
|
454 | 454 | |
|
455 | 455 | # get lock as we might write phase data |
|
456 | 456 | wlock = lock = None |
|
457 | 457 | try: |
|
458 | 458 | # bundle2 push may receive a reply bundle touching bookmarks or other |
|
459 | 459 | # things requiring the wlock. Take it now to ensure proper ordering. |
|
460 | 460 | maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback') |
|
461 | 461 | if (not _forcebundle1(pushop)) and maypushback: |
|
462 | 462 | wlock = pushop.repo.wlock() |
|
463 | 463 | lock = pushop.repo.lock() |
|
464 | 464 | pushop.trmanager = transactionmanager(pushop.repo, |
|
465 | 465 | 'push-response', |
|
466 | 466 | pushop.remote.url()) |
|
467 | 467 | except IOError as err: |
|
468 | 468 | if err.errno != errno.EACCES: |
|
469 | 469 | raise |
|
470 | 470 | # source repo cannot be locked. |
|
471 | 471 | # We do not abort the push, but just disable the local phase |
|
472 | 472 | # synchronisation. |
|
473 | 473 | msg = 'cannot lock source repository: %s\n' % err |
|
474 | 474 | pushop.ui.debug(msg) |
|
475 | 475 | |
|
476 | 476 | with wlock or util.nullcontextmanager(), \ |
|
477 | 477 | lock or util.nullcontextmanager(), \ |
|
478 | 478 | pushop.trmanager or util.nullcontextmanager(): |
|
479 | 479 | pushop.repo.checkpush(pushop) |
|
480 | 480 | _pushdiscovery(pushop) |
|
481 | 481 | if not _forcebundle1(pushop): |
|
482 | 482 | _pushbundle2(pushop) |
|
483 | 483 | _pushchangeset(pushop) |
|
484 | 484 | _pushsyncphase(pushop) |
|
485 | 485 | _pushobsolete(pushop) |
|
486 | 486 | _pushbookmark(pushop) |
|
487 | 487 | |
|
488 | 488 | return pushop |
|
489 | 489 | |
|
490 | 490 | # list of steps to perform discovery before push |
|
491 | 491 | pushdiscoveryorder = [] |
|
492 | 492 | |
|
493 | 493 | # Mapping between step name and function |
|
494 | 494 | # |
|
495 | 495 | # This exists to help extensions wrap steps if necessary |
|
496 | 496 | pushdiscoverymapping = {} |
|
497 | 497 | |
|
498 | 498 | def pushdiscovery(stepname): |
|
499 | 499 | """decorator for function performing discovery before push |
|
500 | 500 | |
|
501 | 501 | The function is added to the step -> function mapping and appended to the |
|
502 | 502 | list of steps. Beware that decorated function will be added in order (this |
|
503 | 503 | may matter). |
|
504 | 504 | |
|
505 | 505 | You can only use this decorator for a new step, if you want to wrap a step |
|
506 | 506 | from an extension, change the pushdiscovery dictionary directly.""" |
|
507 | 507 | def dec(func): |
|
508 | 508 | assert stepname not in pushdiscoverymapping |
|
509 | 509 | pushdiscoverymapping[stepname] = func |
|
510 | 510 | pushdiscoveryorder.append(stepname) |
|
511 | 511 | return func |
|
512 | 512 | return dec |
|
513 | 513 | |
|
514 | 514 | def _pushdiscovery(pushop): |
|
515 | 515 | """Run all discovery steps""" |
|
516 | 516 | for stepname in pushdiscoveryorder: |
|
517 | 517 | step = pushdiscoverymapping[stepname] |
|
518 | 518 | step(pushop) |
|
519 | 519 | |
|
520 | 520 | @pushdiscovery('changeset') |
|
521 | 521 | def _pushdiscoverychangeset(pushop): |
|
522 | 522 | """discover the changeset that need to be pushed""" |
|
523 | 523 | fci = discovery.findcommonincoming |
|
524 | 524 | if pushop.revs: |
|
525 | 525 | commoninc = fci(pushop.repo, pushop.remote, force=pushop.force, |
|
526 | 526 | ancestorsof=pushop.revs) |
|
527 | 527 | else: |
|
528 | 528 | commoninc = fci(pushop.repo, pushop.remote, force=pushop.force) |
|
529 | 529 | common, inc, remoteheads = commoninc |
|
530 | 530 | fco = discovery.findcommonoutgoing |
|
531 | 531 | outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs, |
|
532 | 532 | commoninc=commoninc, force=pushop.force) |
|
533 | 533 | pushop.outgoing = outgoing |
|
534 | 534 | pushop.remoteheads = remoteheads |
|
535 | 535 | pushop.incoming = inc |
|
536 | 536 | |
|
537 | 537 | @pushdiscovery('phase') |
|
538 | 538 | def _pushdiscoveryphase(pushop): |
|
539 | 539 | """discover the phase that needs to be pushed |
|
540 | 540 | |
|
541 | 541 | (computed for both success and failure case for changesets push)""" |
|
542 | 542 | outgoing = pushop.outgoing |
|
543 | 543 | unfi = pushop.repo.unfiltered() |
|
544 | 544 | remotephases = pushop.remote.listkeys('phases') |
|
545 | 545 | if (pushop.ui.configbool('ui', '_usedassubrepo') |
|
546 | 546 | and remotephases # server supports phases |
|
547 | 547 | and not pushop.outgoing.missing # no changesets to be pushed |
|
548 | 548 | and remotephases.get('publishing', False)): |
|
549 | 549 | # When: |
|
550 | 550 | # - this is a subrepo push |
|
551 | 551 | # - and remote support phase |
|
552 | 552 | # - and no changeset are to be pushed |
|
553 | 553 | # - and remote is publishing |
|
554 | 554 | # We may be in issue 3781 case! |
|
555 | 555 | # We drop the possible phase synchronisation done by |
|
556 | 556 | # courtesy to publish changesets possibly locally draft |
|
557 | 557 | # on the remote. |
|
558 | 558 | pushop.outdatedphases = [] |
|
559 | 559 | pushop.fallbackoutdatedphases = [] |
|
560 | 560 | return |
|
561 | 561 | |
|
562 | 562 | pushop.remotephases = phases.remotephasessummary(pushop.repo, |
|
563 | 563 | pushop.fallbackheads, |
|
564 | 564 | remotephases) |
|
565 | 565 | droots = pushop.remotephases.draftroots |
|
566 | 566 | |
|
567 | 567 | extracond = '' |
|
568 | 568 | if not pushop.remotephases.publishing: |
|
569 | 569 | extracond = ' and public()' |
|
570 | 570 | revset = 'heads((%%ln::%%ln) %s)' % extracond |
|
571 | 571 | # Get the list of all revs draft on remote by public here. |
|
572 | 572 | # XXX Beware that revset break if droots is not strictly |
|
573 | 573 | # XXX root we may want to ensure it is but it is costly |
|
574 | 574 | fallback = list(unfi.set(revset, droots, pushop.fallbackheads)) |
|
575 | 575 | if not outgoing.missing: |
|
576 | 576 | future = fallback |
|
577 | 577 | else: |
|
578 | 578 | # adds changeset we are going to push as draft |
|
579 | 579 | # |
|
580 | 580 | # should not be necessary for publishing server, but because of an |
|
581 | 581 | # issue fixed in xxxxx we have to do it anyway. |
|
582 | 582 | fdroots = list(unfi.set('roots(%ln + %ln::)', |
|
583 | 583 | outgoing.missing, droots)) |
|
584 | 584 | fdroots = [f.node() for f in fdroots] |
|
585 | 585 | future = list(unfi.set(revset, fdroots, pushop.futureheads)) |
|
586 | 586 | pushop.outdatedphases = future |
|
587 | 587 | pushop.fallbackoutdatedphases = fallback |
|
588 | 588 | |
|
589 | 589 | @pushdiscovery('obsmarker') |
|
590 | 590 | def _pushdiscoveryobsmarkers(pushop): |
|
591 | 591 | if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt) |
|
592 | 592 | and pushop.repo.obsstore |
|
593 | 593 | and 'obsolete' in pushop.remote.listkeys('namespaces')): |
|
594 | 594 | repo = pushop.repo |
|
595 | 595 | # very naive computation, that can be quite expensive on big repo. |
|
596 | 596 | # However: evolution is currently slow on them anyway. |
|
597 | 597 | nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads)) |
|
598 | 598 | pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes) |
|
599 | 599 | |
|
600 | 600 | @pushdiscovery('bookmarks') |
|
601 | 601 | def _pushdiscoverybookmarks(pushop): |
|
602 | 602 | ui = pushop.ui |
|
603 | 603 | repo = pushop.repo.unfiltered() |
|
604 | 604 | remote = pushop.remote |
|
605 | 605 | ui.debug("checking for updated bookmarks\n") |
|
606 | 606 | ancestors = () |
|
607 | 607 | if pushop.revs: |
|
608 | 608 | revnums = map(repo.changelog.rev, pushop.revs) |
|
609 | 609 | ancestors = repo.changelog.ancestors(revnums, inclusive=True) |
|
610 | 610 | remotebookmark = remote.listkeys('bookmarks') |
|
611 | 611 | |
|
612 | 612 | explicit = set([repo._bookmarks.expandname(bookmark) |
|
613 | 613 | for bookmark in pushop.bookmarks]) |
|
614 | 614 | |
|
615 | 615 | remotebookmark = bookmod.unhexlifybookmarks(remotebookmark) |
|
616 | 616 | comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark) |
|
617 | 617 | |
|
618 | 618 | def safehex(x): |
|
619 | 619 | if x is None: |
|
620 | 620 | return x |
|
621 | 621 | return hex(x) |
|
622 | 622 | |
|
623 | 623 | def hexifycompbookmarks(bookmarks): |
|
624 | 624 | for b, scid, dcid in bookmarks: |
|
625 | 625 | yield b, safehex(scid), safehex(dcid) |
|
626 | 626 | |
|
627 | 627 | comp = [hexifycompbookmarks(marks) for marks in comp] |
|
628 | 628 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp |
|
629 | 629 | |
|
630 | 630 | for b, scid, dcid in advsrc: |
|
631 | 631 | if b in explicit: |
|
632 | 632 | explicit.remove(b) |
|
633 | 633 | if not ancestors or repo[scid].rev() in ancestors: |
|
634 | 634 | pushop.outbookmarks.append((b, dcid, scid)) |
|
635 | 635 | # search added bookmark |
|
636 | 636 | for b, scid, dcid in addsrc: |
|
637 | 637 | if b in explicit: |
|
638 | 638 | explicit.remove(b) |
|
639 | 639 | pushop.outbookmarks.append((b, '', scid)) |
|
640 | 640 | # search for overwritten bookmark |
|
641 | 641 | for b, scid, dcid in list(advdst) + list(diverge) + list(differ): |
|
642 | 642 | if b in explicit: |
|
643 | 643 | explicit.remove(b) |
|
644 | 644 | pushop.outbookmarks.append((b, dcid, scid)) |
|
645 | 645 | # search for bookmark to delete |
|
646 | 646 | for b, scid, dcid in adddst: |
|
647 | 647 | if b in explicit: |
|
648 | 648 | explicit.remove(b) |
|
649 | 649 | # treat as "deleted locally" |
|
650 | 650 | pushop.outbookmarks.append((b, dcid, '')) |
|
651 | 651 | # identical bookmarks shouldn't get reported |
|
652 | 652 | for b, scid, dcid in same: |
|
653 | 653 | if b in explicit: |
|
654 | 654 | explicit.remove(b) |
|
655 | 655 | |
|
656 | 656 | if explicit: |
|
657 | 657 | explicit = sorted(explicit) |
|
658 | 658 | # we should probably list all of them |
|
659 | 659 | ui.warn(_('bookmark %s does not exist on the local ' |
|
660 | 660 | 'or remote repository!\n') % explicit[0]) |
|
661 | 661 | pushop.bkresult = 2 |
|
662 | 662 | |
|
663 | 663 | pushop.outbookmarks.sort() |
|
664 | 664 | |
|
665 | 665 | def _pushcheckoutgoing(pushop): |
|
666 | 666 | outgoing = pushop.outgoing |
|
667 | 667 | unfi = pushop.repo.unfiltered() |
|
668 | 668 | if not outgoing.missing: |
|
669 | 669 | # nothing to push |
|
670 | 670 | scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) |
|
671 | 671 | return False |
|
672 | 672 | # something to push |
|
673 | 673 | if not pushop.force: |
|
674 | 674 | # if repo.obsstore == False --> no obsolete |
|
675 | 675 | # then, save the iteration |
|
676 | 676 | if unfi.obsstore: |
|
677 | 677 | # this message are here for 80 char limit reason |
|
678 | 678 | mso = _("push includes obsolete changeset: %s!") |
|
679 | 679 | mspd = _("push includes phase-divergent changeset: %s!") |
|
680 | 680 | mscd = _("push includes content-divergent changeset: %s!") |
|
681 | 681 | mst = {"orphan": _("push includes orphan changeset: %s!"), |
|
682 | 682 | "phase-divergent": mspd, |
|
683 | 683 | "content-divergent": mscd} |
|
684 | 684 | # If we are to push if there is at least one |
|
685 | 685 | # obsolete or unstable changeset in missing, at |
|
686 | 686 | # least one of the missinghead will be obsolete or |
|
687 | 687 | # unstable. So checking heads only is ok |
|
688 | 688 | for node in outgoing.missingheads: |
|
689 | 689 | ctx = unfi[node] |
|
690 | 690 | if ctx.obsolete(): |
|
691 | 691 | raise error.Abort(mso % ctx) |
|
692 | 692 | elif ctx.isunstable(): |
|
693 | 693 | # TODO print more than one instability in the abort |
|
694 | 694 | # message |
|
695 | 695 | raise error.Abort(mst[ctx.instabilities()[0]] % ctx) |
|
696 | 696 | |
|
697 | 697 | discovery.checkheads(pushop) |
|
698 | 698 | return True |
|
699 | 699 | |
|
700 | 700 | # List of names of steps to perform for an outgoing bundle2, order matters. |
|
701 | 701 | b2partsgenorder = [] |
|
702 | 702 | |
|
703 | 703 | # Mapping between step name and function |
|
704 | 704 | # |
|
705 | 705 | # This exists to help extensions wrap steps if necessary |
|
706 | 706 | b2partsgenmapping = {} |
|
707 | 707 | |
|
708 | 708 | def b2partsgenerator(stepname, idx=None): |
|
709 | 709 | """decorator for function generating bundle2 part |
|
710 | 710 | |
|
711 | 711 | The function is added to the step -> function mapping and appended to the |
|
712 | 712 | list of steps. Beware that decorated functions will be added in order |
|
713 | 713 | (this may matter). |
|
714 | 714 | |
|
715 | 715 | You can only use this decorator for new steps, if you want to wrap a step |
|
716 | 716 | from an extension, attack the b2partsgenmapping dictionary directly.""" |
|
717 | 717 | def dec(func): |
|
718 | 718 | assert stepname not in b2partsgenmapping |
|
719 | 719 | b2partsgenmapping[stepname] = func |
|
720 | 720 | if idx is None: |
|
721 | 721 | b2partsgenorder.append(stepname) |
|
722 | 722 | else: |
|
723 | 723 | b2partsgenorder.insert(idx, stepname) |
|
724 | 724 | return func |
|
725 | 725 | return dec |
|
726 | 726 | |
|
727 | 727 | def _pushb2ctxcheckheads(pushop, bundler): |
|
728 | 728 | """Generate race condition checking parts |
|
729 | 729 | |
|
730 | 730 | Exists as an independent function to aid extensions |
|
731 | 731 | """ |
|
732 | 732 | # * 'force' do not check for push race, |
|
733 | 733 | # * if we don't push anything, there are nothing to check. |
|
734 | 734 | if not pushop.force and pushop.outgoing.missingheads: |
|
735 | 735 | allowunrelated = 'related' in bundler.capabilities.get('checkheads', ()) |
|
736 | 736 | emptyremote = pushop.pushbranchmap is None |
|
737 | 737 | if not allowunrelated or emptyremote: |
|
738 | 738 | bundler.newpart('check:heads', data=iter(pushop.remoteheads)) |
|
739 | 739 | else: |
|
740 | 740 | affected = set() |
|
741 | 741 | for branch, heads in pushop.pushbranchmap.iteritems(): |
|
742 | 742 | remoteheads, newheads, unsyncedheads, discardedheads = heads |
|
743 | 743 | if remoteheads is not None: |
|
744 | 744 | remote = set(remoteheads) |
|
745 | 745 | affected |= set(discardedheads) & remote |
|
746 | 746 | affected |= remote - set(newheads) |
|
747 | 747 | if affected: |
|
748 | 748 | data = iter(sorted(affected)) |
|
749 | 749 | bundler.newpart('check:updated-heads', data=data) |
|
750 | 750 | |
|
751 | 751 | def _pushing(pushop): |
|
752 | 752 | """return True if we are pushing anything""" |
|
753 | 753 | return bool(pushop.outgoing.missing |
|
754 | 754 | or pushop.outdatedphases |
|
755 | 755 | or pushop.outobsmarkers |
|
756 | 756 | or pushop.outbookmarks) |
|
757 | 757 | |
|
758 | 758 | @b2partsgenerator('check-bookmarks') |
|
759 | 759 | def _pushb2checkbookmarks(pushop, bundler): |
|
760 | 760 | """insert bookmark move checking""" |
|
761 | 761 | if not _pushing(pushop) or pushop.force: |
|
762 | 762 | return |
|
763 | 763 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
764 | 764 | hasbookmarkcheck = 'bookmarks' in b2caps |
|
765 | 765 | if not (pushop.outbookmarks and hasbookmarkcheck): |
|
766 | 766 | return |
|
767 | 767 | data = [] |
|
768 | 768 | for book, old, new in pushop.outbookmarks: |
|
769 | 769 | old = bin(old) |
|
770 | 770 | data.append((book, old)) |
|
771 | 771 | checkdata = bookmod.binaryencode(data) |
|
772 | 772 | bundler.newpart('check:bookmarks', data=checkdata) |
|
773 | 773 | |
|
774 | 774 | @b2partsgenerator('check-phases') |
|
775 | 775 | def _pushb2checkphases(pushop, bundler): |
|
776 | 776 | """insert phase move checking""" |
|
777 | 777 | if not _pushing(pushop) or pushop.force: |
|
778 | 778 | return |
|
779 | 779 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
780 | 780 | hasphaseheads = 'heads' in b2caps.get('phases', ()) |
|
781 | 781 | if pushop.remotephases is not None and hasphaseheads: |
|
782 | 782 | # check that the remote phase has not changed |
|
783 | 783 | checks = [[] for p in phases.allphases] |
|
784 | 784 | checks[phases.public].extend(pushop.remotephases.publicheads) |
|
785 | 785 | checks[phases.draft].extend(pushop.remotephases.draftroots) |
|
786 | 786 | if any(checks): |
|
787 | 787 | for nodes in checks: |
|
788 | 788 | nodes.sort() |
|
789 | 789 | checkdata = phases.binaryencode(checks) |
|
790 | 790 | bundler.newpart('check:phases', data=checkdata) |
|
791 | 791 | |
|
792 | 792 | @b2partsgenerator('changeset') |
|
793 | 793 | def _pushb2ctx(pushop, bundler): |
|
794 | 794 | """handle changegroup push through bundle2 |
|
795 | 795 | |
|
796 | 796 | addchangegroup result is stored in the ``pushop.cgresult`` attribute. |
|
797 | 797 | """ |
|
798 | 798 | if 'changesets' in pushop.stepsdone: |
|
799 | 799 | return |
|
800 | 800 | pushop.stepsdone.add('changesets') |
|
801 | 801 | # Send known heads to the server for race detection. |
|
802 | 802 | if not _pushcheckoutgoing(pushop): |
|
803 | 803 | return |
|
804 | 804 | pushop.repo.prepushoutgoinghooks(pushop) |
|
805 | 805 | |
|
806 | 806 | _pushb2ctxcheckheads(pushop, bundler) |
|
807 | 807 | |
|
808 | 808 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
809 | 809 | version = '01' |
|
810 | 810 | cgversions = b2caps.get('changegroup') |
|
811 | 811 | if cgversions: # 3.1 and 3.2 ship with an empty value |
|
812 | 812 | cgversions = [v for v in cgversions |
|
813 | 813 | if v in changegroup.supportedoutgoingversions( |
|
814 | 814 | pushop.repo)] |
|
815 | 815 | if not cgversions: |
|
816 | 816 | raise ValueError(_('no common changegroup version')) |
|
817 | 817 | version = max(cgversions) |
|
818 | 818 | cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version, |
|
819 | 819 | 'push') |
|
820 | 820 | cgpart = bundler.newpart('changegroup', data=cgstream) |
|
821 | 821 | if cgversions: |
|
822 | 822 | cgpart.addparam('version', version) |
|
823 | 823 | if 'treemanifest' in pushop.repo.requirements: |
|
824 | 824 | cgpart.addparam('treemanifest', '1') |
|
825 | 825 | def handlereply(op): |
|
826 | 826 | """extract addchangegroup returns from server reply""" |
|
827 | 827 | cgreplies = op.records.getreplies(cgpart.id) |
|
828 | 828 | assert len(cgreplies['changegroup']) == 1 |
|
829 | 829 | pushop.cgresult = cgreplies['changegroup'][0]['return'] |
|
830 | 830 | return handlereply |
|
831 | 831 | |
|
832 | 832 | @b2partsgenerator('phase') |
|
833 | 833 | def _pushb2phases(pushop, bundler): |
|
834 | 834 | """handle phase push through bundle2""" |
|
835 | 835 | if 'phases' in pushop.stepsdone: |
|
836 | 836 | return |
|
837 | 837 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
838 | 838 | ui = pushop.repo.ui |
|
839 | 839 | |
|
840 | 840 | legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') |
|
841 | 841 | haspushkey = 'pushkey' in b2caps |
|
842 | 842 | hasphaseheads = 'heads' in b2caps.get('phases', ()) |
|
843 | 843 | |
|
844 | 844 | if hasphaseheads and not legacyphase: |
|
845 | 845 | return _pushb2phaseheads(pushop, bundler) |
|
846 | 846 | elif haspushkey: |
|
847 | 847 | return _pushb2phasespushkey(pushop, bundler) |
|
848 | 848 | |
|
849 | 849 | def _pushb2phaseheads(pushop, bundler): |
|
850 | 850 | """push phase information through a bundle2 - binary part""" |
|
851 | 851 | pushop.stepsdone.add('phases') |
|
852 | 852 | if pushop.outdatedphases: |
|
853 | 853 | updates = [[] for p in phases.allphases] |
|
854 | 854 | updates[0].extend(h.node() for h in pushop.outdatedphases) |
|
855 | 855 | phasedata = phases.binaryencode(updates) |
|
856 | 856 | bundler.newpart('phase-heads', data=phasedata) |
|
857 | 857 | |
|
858 | 858 | def _pushb2phasespushkey(pushop, bundler): |
|
859 | 859 | """push phase information through a bundle2 - pushkey part""" |
|
860 | 860 | pushop.stepsdone.add('phases') |
|
861 | 861 | part2node = [] |
|
862 | 862 | |
|
863 | 863 | def handlefailure(pushop, exc): |
|
864 | 864 | targetid = int(exc.partid) |
|
865 | 865 | for partid, node in part2node: |
|
866 | 866 | if partid == targetid: |
|
867 | 867 | raise error.Abort(_('updating %s to public failed') % node) |
|
868 | 868 | |
|
869 | 869 | enc = pushkey.encode |
|
870 | 870 | for newremotehead in pushop.outdatedphases: |
|
871 | 871 | part = bundler.newpart('pushkey') |
|
872 | 872 | part.addparam('namespace', enc('phases')) |
|
873 | 873 | part.addparam('key', enc(newremotehead.hex())) |
|
874 | 874 | part.addparam('old', enc('%d' % phases.draft)) |
|
875 | 875 | part.addparam('new', enc('%d' % phases.public)) |
|
876 | 876 | part2node.append((part.id, newremotehead)) |
|
877 | 877 | pushop.pkfailcb[part.id] = handlefailure |
|
878 | 878 | |
|
879 | 879 | def handlereply(op): |
|
880 | 880 | for partid, node in part2node: |
|
881 | 881 | partrep = op.records.getreplies(partid) |
|
882 | 882 | results = partrep['pushkey'] |
|
883 | 883 | assert len(results) <= 1 |
|
884 | 884 | msg = None |
|
885 | 885 | if not results: |
|
886 | 886 | msg = _('server ignored update of %s to public!\n') % node |
|
887 | 887 | elif not int(results[0]['return']): |
|
888 | 888 | msg = _('updating %s to public failed!\n') % node |
|
889 | 889 | if msg is not None: |
|
890 | 890 | pushop.ui.warn(msg) |
|
891 | 891 | return handlereply |
|
892 | 892 | |
|
893 | 893 | @b2partsgenerator('obsmarkers') |
|
894 | 894 | def _pushb2obsmarkers(pushop, bundler): |
|
895 | 895 | if 'obsmarkers' in pushop.stepsdone: |
|
896 | 896 | return |
|
897 | 897 | remoteversions = bundle2.obsmarkersversion(bundler.capabilities) |
|
898 | 898 | if obsolete.commonversion(remoteversions) is None: |
|
899 | 899 | return |
|
900 | 900 | pushop.stepsdone.add('obsmarkers') |
|
901 | 901 | if pushop.outobsmarkers: |
|
902 | 902 | markers = sorted(pushop.outobsmarkers) |
|
903 | 903 | bundle2.buildobsmarkerspart(bundler, markers) |
|
904 | 904 | |
|
905 | 905 | @b2partsgenerator('bookmarks') |
|
906 | 906 | def _pushb2bookmarks(pushop, bundler): |
|
907 | 907 | """handle bookmark push through bundle2""" |
|
908 | 908 | if 'bookmarks' in pushop.stepsdone: |
|
909 | 909 | return |
|
910 | 910 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
911 | 911 | |
|
912 | 912 | legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange') |
|
913 | 913 | legacybooks = 'bookmarks' in legacy |
|
914 | 914 | |
|
915 | 915 | if not legacybooks and 'bookmarks' in b2caps: |
|
916 | 916 | return _pushb2bookmarkspart(pushop, bundler) |
|
917 | 917 | elif 'pushkey' in b2caps: |
|
918 | 918 | return _pushb2bookmarkspushkey(pushop, bundler) |
|
919 | 919 | |
|
920 | 920 | def _bmaction(old, new): |
|
921 | 921 | """small utility for bookmark pushing""" |
|
922 | 922 | if not old: |
|
923 | 923 | return 'export' |
|
924 | 924 | elif not new: |
|
925 | 925 | return 'delete' |
|
926 | 926 | return 'update' |
|
927 | 927 | |
|
928 | 928 | def _pushb2bookmarkspart(pushop, bundler): |
|
929 | 929 | pushop.stepsdone.add('bookmarks') |
|
930 | 930 | if not pushop.outbookmarks: |
|
931 | 931 | return |
|
932 | 932 | |
|
933 | 933 | allactions = [] |
|
934 | 934 | data = [] |
|
935 | 935 | for book, old, new in pushop.outbookmarks: |
|
936 | 936 | new = bin(new) |
|
937 | 937 | data.append((book, new)) |
|
938 | 938 | allactions.append((book, _bmaction(old, new))) |
|
939 | 939 | checkdata = bookmod.binaryencode(data) |
|
940 | 940 | bundler.newpart('bookmarks', data=checkdata) |
|
941 | 941 | |
|
942 | 942 | def handlereply(op): |
|
943 | 943 | ui = pushop.ui |
|
944 | 944 | # if success |
|
945 | 945 | for book, action in allactions: |
|
946 | 946 | ui.status(bookmsgmap[action][0] % book) |
|
947 | 947 | |
|
948 | 948 | return handlereply |
|
949 | 949 | |
|
950 | 950 | def _pushb2bookmarkspushkey(pushop, bundler): |
|
951 | 951 | pushop.stepsdone.add('bookmarks') |
|
952 | 952 | part2book = [] |
|
953 | 953 | enc = pushkey.encode |
|
954 | 954 | |
|
955 | 955 | def handlefailure(pushop, exc): |
|
956 | 956 | targetid = int(exc.partid) |
|
957 | 957 | for partid, book, action in part2book: |
|
958 | 958 | if partid == targetid: |
|
959 | 959 | raise error.Abort(bookmsgmap[action][1].rstrip() % book) |
|
960 | 960 | # we should not be called for part we did not generated |
|
961 | 961 | assert False |
|
962 | 962 | |
|
963 | 963 | for book, old, new in pushop.outbookmarks: |
|
964 | 964 | part = bundler.newpart('pushkey') |
|
965 | 965 | part.addparam('namespace', enc('bookmarks')) |
|
966 | 966 | part.addparam('key', enc(book)) |
|
967 | 967 | part.addparam('old', enc(old)) |
|
968 | 968 | part.addparam('new', enc(new)) |
|
969 | 969 | action = 'update' |
|
970 | 970 | if not old: |
|
971 | 971 | action = 'export' |
|
972 | 972 | elif not new: |
|
973 | 973 | action = 'delete' |
|
974 | 974 | part2book.append((part.id, book, action)) |
|
975 | 975 | pushop.pkfailcb[part.id] = handlefailure |
|
976 | 976 | |
|
977 | 977 | def handlereply(op): |
|
978 | 978 | ui = pushop.ui |
|
979 | 979 | for partid, book, action in part2book: |
|
980 | 980 | partrep = op.records.getreplies(partid) |
|
981 | 981 | results = partrep['pushkey'] |
|
982 | 982 | assert len(results) <= 1 |
|
983 | 983 | if not results: |
|
984 | 984 | pushop.ui.warn(_('server ignored bookmark %s update\n') % book) |
|
985 | 985 | else: |
|
986 | 986 | ret = int(results[0]['return']) |
|
987 | 987 | if ret: |
|
988 | 988 | ui.status(bookmsgmap[action][0] % book) |
|
989 | 989 | else: |
|
990 | 990 | ui.warn(bookmsgmap[action][1] % book) |
|
991 | 991 | if pushop.bkresult is not None: |
|
992 | 992 | pushop.bkresult = 1 |
|
993 | 993 | return handlereply |
|
994 | 994 | |
|
995 | 995 | @b2partsgenerator('pushvars', idx=0) |
|
996 | 996 | def _getbundlesendvars(pushop, bundler): |
|
997 | 997 | '''send shellvars via bundle2''' |
|
998 | 998 | pushvars = pushop.pushvars |
|
999 | 999 | if pushvars: |
|
1000 | 1000 | shellvars = {} |
|
1001 | 1001 | for raw in pushvars: |
|
1002 | 1002 | if '=' not in raw: |
|
1003 | 1003 | msg = ("unable to parse variable '%s', should follow " |
|
1004 | 1004 | "'KEY=VALUE' or 'KEY=' format") |
|
1005 | 1005 | raise error.Abort(msg % raw) |
|
1006 | 1006 | k, v = raw.split('=', 1) |
|
1007 | 1007 | shellvars[k] = v |
|
1008 | 1008 | |
|
1009 | 1009 | part = bundler.newpart('pushvars') |
|
1010 | 1010 | |
|
1011 | 1011 | for key, value in shellvars.iteritems(): |
|
1012 | 1012 | part.addparam(key, value, mandatory=False) |
|
1013 | 1013 | |
|
1014 | 1014 | def _pushbundle2(pushop): |
|
1015 | 1015 | """push data to the remote using bundle2 |
|
1016 | 1016 | |
|
1017 | 1017 | The only currently supported type of data is changegroup but this will |
|
1018 | 1018 | evolve in the future.""" |
|
1019 | 1019 | bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote)) |
|
1020 | 1020 | pushback = (pushop.trmanager |
|
1021 | 1021 | and pushop.ui.configbool('experimental', 'bundle2.pushback')) |
|
1022 | 1022 | |
|
1023 | 1023 | # create reply capability |
|
1024 | 1024 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo, |
|
1025 | 1025 | allowpushback=pushback, |
|
1026 | 1026 | role='client')) |
|
1027 | 1027 | bundler.newpart('replycaps', data=capsblob) |
|
1028 | 1028 | replyhandlers = [] |
|
1029 | 1029 | for partgenname in b2partsgenorder: |
|
1030 | 1030 | partgen = b2partsgenmapping[partgenname] |
|
1031 | 1031 | ret = partgen(pushop, bundler) |
|
1032 | 1032 | if callable(ret): |
|
1033 | 1033 | replyhandlers.append(ret) |
|
1034 | 1034 | # do not push if nothing to push |
|
1035 | 1035 | if bundler.nbparts <= 1: |
|
1036 | 1036 | return |
|
1037 | 1037 | stream = util.chunkbuffer(bundler.getchunks()) |
|
1038 | 1038 | try: |
|
1039 | 1039 | try: |
|
1040 | 1040 | reply = pushop.remote.unbundle( |
|
1041 | 1041 | stream, ['force'], pushop.remote.url()) |
|
1042 | 1042 | except error.BundleValueError as exc: |
|
1043 | 1043 | raise error.Abort(_('missing support for %s') % exc) |
|
1044 | 1044 | try: |
|
1045 | 1045 | trgetter = None |
|
1046 | 1046 | if pushback: |
|
1047 | 1047 | trgetter = pushop.trmanager.transaction |
|
1048 | 1048 | op = bundle2.processbundle(pushop.repo, reply, trgetter) |
|
1049 | 1049 | except error.BundleValueError as exc: |
|
1050 | 1050 | raise error.Abort(_('missing support for %s') % exc) |
|
1051 | 1051 | except bundle2.AbortFromPart as exc: |
|
1052 | 1052 | pushop.ui.status(_('remote: %s\n') % exc) |
|
1053 | 1053 | if exc.hint is not None: |
|
1054 | 1054 | pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint)) |
|
1055 | 1055 | raise error.Abort(_('push failed on remote')) |
|
1056 | 1056 | except error.PushkeyFailed as exc: |
|
1057 | 1057 | partid = int(exc.partid) |
|
1058 | 1058 | if partid not in pushop.pkfailcb: |
|
1059 | 1059 | raise |
|
1060 | 1060 | pushop.pkfailcb[partid](pushop, exc) |
|
1061 | 1061 | for rephand in replyhandlers: |
|
1062 | 1062 | rephand(op) |
|
1063 | 1063 | |
|
1064 | 1064 | def _pushchangeset(pushop): |
|
1065 | 1065 | """Make the actual push of changeset bundle to remote repo""" |
|
1066 | 1066 | if 'changesets' in pushop.stepsdone: |
|
1067 | 1067 | return |
|
1068 | 1068 | pushop.stepsdone.add('changesets') |
|
1069 | 1069 | if not _pushcheckoutgoing(pushop): |
|
1070 | 1070 | return |
|
1071 | 1071 | |
|
1072 | 1072 | # Should have verified this in push(). |
|
1073 | 1073 | assert pushop.remote.capable('unbundle') |
|
1074 | 1074 | |
|
1075 | 1075 | pushop.repo.prepushoutgoinghooks(pushop) |
|
1076 | 1076 | outgoing = pushop.outgoing |
|
1077 | 1077 | # TODO: get bundlecaps from remote |
|
1078 | 1078 | bundlecaps = None |
|
1079 | 1079 | # create a changegroup from local |
|
1080 | 1080 | if pushop.revs is None and not (outgoing.excluded |
|
1081 | 1081 | or pushop.repo.changelog.filteredrevs): |
|
1082 | 1082 | # push everything, |
|
1083 | 1083 | # use the fast path, no race possible on push |
|
1084 | 1084 | cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push', |
|
1085 | 1085 | fastpath=True, bundlecaps=bundlecaps) |
|
1086 | 1086 | else: |
|
1087 | 1087 | cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', |
|
1088 | 1088 | 'push', bundlecaps=bundlecaps) |
|
1089 | 1089 | |
|
1090 | 1090 | # apply changegroup to remote |
|
1091 | 1091 | # local repo finds heads on server, finds out what |
|
1092 | 1092 | # revs it must push. once revs transferred, if server |
|
1093 | 1093 | # finds it has different heads (someone else won |
|
1094 | 1094 | # commit/push race), server aborts. |
|
1095 | 1095 | if pushop.force: |
|
1096 | 1096 | remoteheads = ['force'] |
|
1097 | 1097 | else: |
|
1098 | 1098 | remoteheads = pushop.remoteheads |
|
1099 | 1099 | # ssh: return remote's addchangegroup() |
|
1100 | 1100 | # http: return remote's addchangegroup() or 0 for error |
|
1101 | 1101 | pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, |
|
1102 | 1102 | pushop.repo.url()) |
|
1103 | 1103 | |
|
1104 | 1104 | def _pushsyncphase(pushop): |
|
1105 | 1105 | """synchronise phase information locally and remotely""" |
|
1106 | 1106 | cheads = pushop.commonheads |
|
1107 | 1107 | # even when we don't push, exchanging phase data is useful |
|
1108 | 1108 | remotephases = pushop.remote.listkeys('phases') |
|
1109 | 1109 | if (pushop.ui.configbool('ui', '_usedassubrepo') |
|
1110 | 1110 | and remotephases # server supports phases |
|
1111 | 1111 | and pushop.cgresult is None # nothing was pushed |
|
1112 | 1112 | and remotephases.get('publishing', False)): |
|
1113 | 1113 | # When: |
|
1114 | 1114 | # - this is a subrepo push |
|
1115 | 1115 | # - and remote support phase |
|
1116 | 1116 | # - and no changeset was pushed |
|
1117 | 1117 | # - and remote is publishing |
|
1118 | 1118 | # We may be in issue 3871 case! |
|
1119 | 1119 | # We drop the possible phase synchronisation done by |
|
1120 | 1120 | # courtesy to publish changesets possibly locally draft |
|
1121 | 1121 | # on the remote. |
|
1122 | 1122 | remotephases = {'publishing': 'True'} |
|
1123 | 1123 | if not remotephases: # old server or public only reply from non-publishing |
|
1124 | 1124 | _localphasemove(pushop, cheads) |
|
1125 | 1125 | # don't push any phase data as there is nothing to push |
|
1126 | 1126 | else: |
|
1127 | 1127 | ana = phases.analyzeremotephases(pushop.repo, cheads, |
|
1128 | 1128 | remotephases) |
|
1129 | 1129 | pheads, droots = ana |
|
1130 | 1130 | ### Apply remote phase on local |
|
1131 | 1131 | if remotephases.get('publishing', False): |
|
1132 | 1132 | _localphasemove(pushop, cheads) |
|
1133 | 1133 | else: # publish = False |
|
1134 | 1134 | _localphasemove(pushop, pheads) |
|
1135 | 1135 | _localphasemove(pushop, cheads, phases.draft) |
|
1136 | 1136 | ### Apply local phase on remote |
|
1137 | 1137 | |
|
1138 | 1138 | if pushop.cgresult: |
|
1139 | 1139 | if 'phases' in pushop.stepsdone: |
|
1140 | 1140 | # phases already pushed though bundle2 |
|
1141 | 1141 | return |
|
1142 | 1142 | outdated = pushop.outdatedphases |
|
1143 | 1143 | else: |
|
1144 | 1144 | outdated = pushop.fallbackoutdatedphases |
|
1145 | 1145 | |
|
1146 | 1146 | pushop.stepsdone.add('phases') |
|
1147 | 1147 | |
|
1148 | 1148 | # filter heads already turned public by the push |
|
1149 | 1149 | outdated = [c for c in outdated if c.node() not in pheads] |
|
1150 | 1150 | # fallback to independent pushkey command |
|
1151 | 1151 | for newremotehead in outdated: |
|
1152 | 1152 | r = pushop.remote.pushkey('phases', |
|
1153 | 1153 | newremotehead.hex(), |
|
1154 | 1154 | str(phases.draft), |
|
1155 | 1155 | str(phases.public)) |
|
1156 | 1156 | if not r: |
|
1157 | 1157 | pushop.ui.warn(_('updating %s to public failed!\n') |
|
1158 | 1158 | % newremotehead) |
|
1159 | 1159 | |
|
1160 | 1160 | def _localphasemove(pushop, nodes, phase=phases.public): |
|
1161 | 1161 | """move <nodes> to <phase> in the local source repo""" |
|
1162 | 1162 | if pushop.trmanager: |
|
1163 | 1163 | phases.advanceboundary(pushop.repo, |
|
1164 | 1164 | pushop.trmanager.transaction(), |
|
1165 | 1165 | phase, |
|
1166 | 1166 | nodes) |
|
1167 | 1167 | else: |
|
1168 | 1168 | # repo is not locked, do not change any phases! |
|
1169 | 1169 | # Informs the user that phases should have been moved when |
|
1170 | 1170 | # applicable. |
|
1171 | 1171 | actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()] |
|
1172 | 1172 | phasestr = phases.phasenames[phase] |
|
1173 | 1173 | if actualmoves: |
|
1174 | 1174 | pushop.ui.status(_('cannot lock source repo, skipping ' |
|
1175 | 1175 | 'local %s phase update\n') % phasestr) |
|
1176 | 1176 | |
|
1177 | 1177 | def _pushobsolete(pushop): |
|
1178 | 1178 | """utility function to push obsolete markers to a remote""" |
|
1179 | 1179 | if 'obsmarkers' in pushop.stepsdone: |
|
1180 | 1180 | return |
|
1181 | 1181 | repo = pushop.repo |
|
1182 | 1182 | remote = pushop.remote |
|
1183 | 1183 | pushop.stepsdone.add('obsmarkers') |
|
1184 | 1184 | if pushop.outobsmarkers: |
|
1185 | 1185 | pushop.ui.debug('try to push obsolete markers to remote\n') |
|
1186 | 1186 | rslts = [] |
|
1187 | 1187 | remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers)) |
|
1188 | 1188 | for key in sorted(remotedata, reverse=True): |
|
1189 | 1189 | # reverse sort to ensure we end with dump0 |
|
1190 | 1190 | data = remotedata[key] |
|
1191 | 1191 | rslts.append(remote.pushkey('obsolete', key, '', data)) |
|
1192 | 1192 | if [r for r in rslts if not r]: |
|
1193 | 1193 | msg = _('failed to push some obsolete markers!\n') |
|
1194 | 1194 | repo.ui.warn(msg) |
|
1195 | 1195 | |
|
1196 | 1196 | def _pushbookmark(pushop): |
|
1197 | 1197 | """Update bookmark position on remote""" |
|
1198 | 1198 | if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone: |
|
1199 | 1199 | return |
|
1200 | 1200 | pushop.stepsdone.add('bookmarks') |
|
1201 | 1201 | ui = pushop.ui |
|
1202 | 1202 | remote = pushop.remote |
|
1203 | 1203 | |
|
1204 | 1204 | for b, old, new in pushop.outbookmarks: |
|
1205 | 1205 | action = 'update' |
|
1206 | 1206 | if not old: |
|
1207 | 1207 | action = 'export' |
|
1208 | 1208 | elif not new: |
|
1209 | 1209 | action = 'delete' |
|
1210 | 1210 | if remote.pushkey('bookmarks', b, old, new): |
|
1211 | 1211 | ui.status(bookmsgmap[action][0] % b) |
|
1212 | 1212 | else: |
|
1213 | 1213 | ui.warn(bookmsgmap[action][1] % b) |
|
1214 | 1214 | # discovery can have set the value form invalid entry |
|
1215 | 1215 | if pushop.bkresult is not None: |
|
1216 | 1216 | pushop.bkresult = 1 |
|
1217 | 1217 | |
|
1218 | 1218 | class pulloperation(object): |
|
1219 | 1219 | """A object that represent a single pull operation |
|
1220 | 1220 | |
|
1221 | 1221 | It purpose is to carry pull related state and very common operation. |
|
1222 | 1222 | |
|
1223 | 1223 | A new should be created at the beginning of each pull and discarded |
|
1224 | 1224 | afterward. |
|
1225 | 1225 | """ |
|
1226 | 1226 | |
|
1227 | 1227 | def __init__(self, repo, remote, heads=None, force=False, bookmarks=(), |
|
1228 | 1228 | remotebookmarks=None, streamclonerequested=None): |
|
1229 | 1229 | # repo we pull into |
|
1230 | 1230 | self.repo = repo |
|
1231 | 1231 | # repo we pull from |
|
1232 | 1232 | self.remote = remote |
|
1233 | 1233 | # revision we try to pull (None is "all") |
|
1234 | 1234 | self.heads = heads |
|
1235 | 1235 | # bookmark pulled explicitly |
|
1236 | 1236 | self.explicitbookmarks = [repo._bookmarks.expandname(bookmark) |
|
1237 | 1237 | for bookmark in bookmarks] |
|
1238 | 1238 | # do we force pull? |
|
1239 | 1239 | self.force = force |
|
1240 | 1240 | # whether a streaming clone was requested |
|
1241 | 1241 | self.streamclonerequested = streamclonerequested |
|
1242 | 1242 | # transaction manager |
|
1243 | 1243 | self.trmanager = None |
|
1244 | 1244 | # set of common changeset between local and remote before pull |
|
1245 | 1245 | self.common = None |
|
1246 | 1246 | # set of pulled head |
|
1247 | 1247 | self.rheads = None |
|
1248 | 1248 | # list of missing changeset to fetch remotely |
|
1249 | 1249 | self.fetch = None |
|
1250 | 1250 | # remote bookmarks data |
|
1251 | 1251 | self.remotebookmarks = remotebookmarks |
|
1252 | 1252 | # result of changegroup pulling (used as return code by pull) |
|
1253 | 1253 | self.cgresult = None |
|
1254 | 1254 | # list of step already done |
|
1255 | 1255 | self.stepsdone = set() |
|
1256 | 1256 | # Whether we attempted a clone from pre-generated bundles. |
|
1257 | 1257 | self.clonebundleattempted = False |
|
1258 | 1258 | |
|
1259 | 1259 | @util.propertycache |
|
1260 | 1260 | def pulledsubset(self): |
|
1261 | 1261 | """heads of the set of changeset target by the pull""" |
|
1262 | 1262 | # compute target subset |
|
1263 | 1263 | if self.heads is None: |
|
1264 | 1264 | # We pulled every thing possible |
|
1265 | 1265 | # sync on everything common |
|
1266 | 1266 | c = set(self.common) |
|
1267 | 1267 | ret = list(self.common) |
|
1268 | 1268 | for n in self.rheads: |
|
1269 | 1269 | if n not in c: |
|
1270 | 1270 | ret.append(n) |
|
1271 | 1271 | return ret |
|
1272 | 1272 | else: |
|
1273 | 1273 | # We pulled a specific subset |
|
1274 | 1274 | # sync on this subset |
|
1275 | 1275 | return self.heads |
|
1276 | 1276 | |
|
1277 | 1277 | @util.propertycache |
|
1278 | 1278 | def canusebundle2(self): |
|
1279 | 1279 | return not _forcebundle1(self) |
|
1280 | 1280 | |
|
1281 | 1281 | @util.propertycache |
|
1282 | 1282 | def remotebundle2caps(self): |
|
1283 | 1283 | return bundle2.bundle2caps(self.remote) |
|
1284 | 1284 | |
|
1285 | 1285 | def gettransaction(self): |
|
1286 | 1286 | # deprecated; talk to trmanager directly |
|
1287 | 1287 | return self.trmanager.transaction() |
|
1288 | 1288 | |
|
1289 | 1289 | class transactionmanager(util.transactional): |
|
1290 | 1290 | """An object to manage the life cycle of a transaction |
|
1291 | 1291 | |
|
1292 | 1292 | It creates the transaction on demand and calls the appropriate hooks when |
|
1293 | 1293 | closing the transaction.""" |
|
1294 | 1294 | def __init__(self, repo, source, url): |
|
1295 | 1295 | self.repo = repo |
|
1296 | 1296 | self.source = source |
|
1297 | 1297 | self.url = url |
|
1298 | 1298 | self._tr = None |
|
1299 | 1299 | |
|
1300 | 1300 | def transaction(self): |
|
1301 | 1301 | """Return an open transaction object, constructing if necessary""" |
|
1302 | 1302 | if not self._tr: |
|
1303 | 1303 | trname = '%s\n%s' % (self.source, util.hidepassword(self.url)) |
|
1304 | 1304 | self._tr = self.repo.transaction(trname) |
|
1305 | 1305 | self._tr.hookargs['source'] = self.source |
|
1306 | 1306 | self._tr.hookargs['url'] = self.url |
|
1307 | 1307 | return self._tr |
|
1308 | 1308 | |
|
1309 | 1309 | def close(self): |
|
1310 | 1310 | """close transaction if created""" |
|
1311 | 1311 | if self._tr is not None: |
|
1312 | 1312 | self._tr.close() |
|
1313 | 1313 | |
|
1314 | 1314 | def release(self): |
|
1315 | 1315 | """release transaction if created""" |
|
1316 | 1316 | if self._tr is not None: |
|
1317 | 1317 | self._tr.release() |
|
1318 | 1318 | |
|
1319 | 1319 | def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None, |
|
1320 | 1320 | streamclonerequested=None): |
|
1321 | 1321 | """Fetch repository data from a remote. |
|
1322 | 1322 | |
|
1323 | 1323 | This is the main function used to retrieve data from a remote repository. |
|
1324 | 1324 | |
|
1325 | 1325 | ``repo`` is the local repository to clone into. |
|
1326 | 1326 | ``remote`` is a peer instance. |
|
1327 | 1327 | ``heads`` is an iterable of revisions we want to pull. ``None`` (the |
|
1328 | 1328 | default) means to pull everything from the remote. |
|
1329 | 1329 | ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By |
|
1330 | 1330 | default, all remote bookmarks are pulled. |
|
1331 | 1331 | ``opargs`` are additional keyword arguments to pass to ``pulloperation`` |
|
1332 | 1332 | initialization. |
|
1333 | 1333 | ``streamclonerequested`` is a boolean indicating whether a "streaming |
|
1334 | 1334 | clone" is requested. A "streaming clone" is essentially a raw file copy |
|
1335 | 1335 | of revlogs from the server. This only works when the local repository is |
|
1336 | 1336 | empty. The default value of ``None`` means to respect the server |
|
1337 | 1337 | configuration for preferring stream clones. |
|
1338 | 1338 | |
|
1339 | 1339 | Returns the ``pulloperation`` created for this pull. |
|
1340 | 1340 | """ |
|
1341 | 1341 | if opargs is None: |
|
1342 | 1342 | opargs = {} |
|
1343 | 1343 | pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks, |
|
1344 | 1344 | streamclonerequested=streamclonerequested, |
|
1345 | 1345 | **pycompat.strkwargs(opargs)) |
|
1346 | 1346 | |
|
1347 | 1347 | peerlocal = pullop.remote.local() |
|
1348 | 1348 | if peerlocal: |
|
1349 | 1349 | missing = set(peerlocal.requirements) - pullop.repo.supported |
|
1350 | 1350 | if missing: |
|
1351 | 1351 | msg = _("required features are not" |
|
1352 | 1352 | " supported in the destination:" |
|
1353 | 1353 | " %s") % (', '.join(sorted(missing))) |
|
1354 | 1354 | raise error.Abort(msg) |
|
1355 | 1355 | |
|
1356 | 1356 | pullop.trmanager = transactionmanager(repo, 'pull', remote.url()) |
|
1357 | 1357 | with repo.wlock(), repo.lock(), pullop.trmanager: |
|
1358 | 1358 | # This should ideally be in _pullbundle2(). However, it needs to run |
|
1359 | 1359 | # before discovery to avoid extra work. |
|
1360 | 1360 | _maybeapplyclonebundle(pullop) |
|
1361 | 1361 | streamclone.maybeperformlegacystreamclone(pullop) |
|
1362 | 1362 | _pulldiscovery(pullop) |
|
1363 | 1363 | if pullop.canusebundle2: |
|
1364 | 1364 | _pullbundle2(pullop) |
|
1365 | 1365 | _pullchangeset(pullop) |
|
1366 | 1366 | _pullphase(pullop) |
|
1367 | 1367 | _pullbookmarks(pullop) |
|
1368 | 1368 | _pullobsolete(pullop) |
|
1369 | 1369 | |
|
1370 | 1370 | # storing remotenames |
|
1371 | 1371 | if repo.ui.configbool('experimental', 'remotenames'): |
|
1372 | 1372 | logexchange.pullremotenames(repo, remote) |
|
1373 | 1373 | |
|
1374 | 1374 | return pullop |
|
1375 | 1375 | |
|
1376 | 1376 | # list of steps to perform discovery before pull |
|
1377 | 1377 | pulldiscoveryorder = [] |
|
1378 | 1378 | |
|
1379 | 1379 | # Mapping between step name and function |
|
1380 | 1380 | # |
|
1381 | 1381 | # This exists to help extensions wrap steps if necessary |
|
1382 | 1382 | pulldiscoverymapping = {} |
|
1383 | 1383 | |
|
1384 | 1384 | def pulldiscovery(stepname): |
|
1385 | 1385 | """decorator for function performing discovery before pull |
|
1386 | 1386 | |
|
1387 | 1387 | The function is added to the step -> function mapping and appended to the |
|
1388 | 1388 | list of steps. Beware that decorated function will be added in order (this |
|
1389 | 1389 | may matter). |
|
1390 | 1390 | |
|
1391 | 1391 | You can only use this decorator for a new step, if you want to wrap a step |
|
1392 | 1392 | from an extension, change the pulldiscovery dictionary directly.""" |
|
1393 | 1393 | def dec(func): |
|
1394 | 1394 | assert stepname not in pulldiscoverymapping |
|
1395 | 1395 | pulldiscoverymapping[stepname] = func |
|
1396 | 1396 | pulldiscoveryorder.append(stepname) |
|
1397 | 1397 | return func |
|
1398 | 1398 | return dec |
|
1399 | 1399 | |
|
1400 | 1400 | def _pulldiscovery(pullop): |
|
1401 | 1401 | """Run all discovery steps""" |
|
1402 | 1402 | for stepname in pulldiscoveryorder: |
|
1403 | 1403 | step = pulldiscoverymapping[stepname] |
|
1404 | 1404 | step(pullop) |
|
1405 | 1405 | |
|
1406 | 1406 | @pulldiscovery('b1:bookmarks') |
|
1407 | 1407 | def _pullbookmarkbundle1(pullop): |
|
1408 | 1408 | """fetch bookmark data in bundle1 case |
|
1409 | 1409 | |
|
1410 | 1410 | If not using bundle2, we have to fetch bookmarks before changeset |
|
1411 | 1411 | discovery to reduce the chance and impact of race conditions.""" |
|
1412 | 1412 | if pullop.remotebookmarks is not None: |
|
1413 | 1413 | return |
|
1414 | 1414 | if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps: |
|
1415 | 1415 | # all known bundle2 servers now support listkeys, but lets be nice with |
|
1416 | 1416 | # new implementation. |
|
1417 | 1417 | return |
|
1418 | 1418 | books = pullop.remote.listkeys('bookmarks') |
|
1419 | 1419 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(books) |
|
1420 | 1420 | |
|
1421 | 1421 | |
|
1422 | 1422 | @pulldiscovery('changegroup') |
|
1423 | 1423 | def _pulldiscoverychangegroup(pullop): |
|
1424 | 1424 | """discovery phase for the pull |
|
1425 | 1425 | |
|
1426 | 1426 | Current handle changeset discovery only, will change handle all discovery |
|
1427 | 1427 | at some point.""" |
|
1428 | 1428 | tmp = discovery.findcommonincoming(pullop.repo, |
|
1429 | 1429 | pullop.remote, |
|
1430 | 1430 | heads=pullop.heads, |
|
1431 | 1431 | force=pullop.force) |
|
1432 | 1432 | common, fetch, rheads = tmp |
|
1433 | 1433 | nm = pullop.repo.unfiltered().changelog.nodemap |
|
1434 | 1434 | if fetch and rheads: |
|
1435 | 1435 | # If a remote heads is filtered locally, put in back in common. |
|
1436 | 1436 | # |
|
1437 | 1437 | # This is a hackish solution to catch most of "common but locally |
|
1438 | 1438 | # hidden situation". We do not performs discovery on unfiltered |
|
1439 | 1439 | # repository because it end up doing a pathological amount of round |
|
1440 | 1440 | # trip for w huge amount of changeset we do not care about. |
|
1441 | 1441 | # |
|
1442 | 1442 | # If a set of such "common but filtered" changeset exist on the server |
|
1443 | 1443 | # but are not including a remote heads, we'll not be able to detect it, |
|
1444 | 1444 | scommon = set(common) |
|
1445 | 1445 | for n in rheads: |
|
1446 | 1446 | if n in nm: |
|
1447 | 1447 | if n not in scommon: |
|
1448 | 1448 | common.append(n) |
|
1449 | 1449 | if set(rheads).issubset(set(common)): |
|
1450 | 1450 | fetch = [] |
|
1451 | 1451 | pullop.common = common |
|
1452 | 1452 | pullop.fetch = fetch |
|
1453 | 1453 | pullop.rheads = rheads |
|
1454 | 1454 | |
|
1455 | 1455 | def _pullbundle2(pullop): |
|
1456 | 1456 | """pull data using bundle2 |
|
1457 | 1457 | |
|
1458 | 1458 | For now, the only supported data are changegroup.""" |
|
1459 | 1459 | kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')} |
|
1460 | 1460 | |
|
1461 | 1461 | # make ui easier to access |
|
1462 | 1462 | ui = pullop.repo.ui |
|
1463 | 1463 | |
|
1464 | 1464 | # At the moment we don't do stream clones over bundle2. If that is |
|
1465 | 1465 | # implemented then here's where the check for that will go. |
|
1466 | 1466 | streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0] |
|
1467 | 1467 | |
|
1468 | 1468 | # declare pull perimeters |
|
1469 | 1469 | kwargs['common'] = pullop.common |
|
1470 | 1470 | kwargs['heads'] = pullop.heads or pullop.rheads |
|
1471 | 1471 | |
|
1472 | 1472 | if streaming: |
|
1473 | 1473 | kwargs['cg'] = False |
|
1474 | 1474 | kwargs['stream'] = True |
|
1475 | 1475 | pullop.stepsdone.add('changegroup') |
|
1476 | 1476 | pullop.stepsdone.add('phases') |
|
1477 | 1477 | |
|
1478 | 1478 | else: |
|
1479 | 1479 | # pulling changegroup |
|
1480 | 1480 | pullop.stepsdone.add('changegroup') |
|
1481 | 1481 | |
|
1482 | 1482 | kwargs['cg'] = pullop.fetch |
|
1483 | 1483 | |
|
1484 | 1484 | legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') |
|
1485 | 1485 | hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ()) |
|
1486 | 1486 | if (not legacyphase and hasbinaryphase): |
|
1487 | 1487 | kwargs['phases'] = True |
|
1488 | 1488 | pullop.stepsdone.add('phases') |
|
1489 | 1489 | |
|
1490 | 1490 | if 'listkeys' in pullop.remotebundle2caps: |
|
1491 | 1491 | if 'phases' not in pullop.stepsdone: |
|
1492 | 1492 | kwargs['listkeys'] = ['phases'] |
|
1493 | 1493 | |
|
1494 | 1494 | bookmarksrequested = False |
|
1495 | 1495 | legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange') |
|
1496 | 1496 | hasbinarybook = 'bookmarks' in pullop.remotebundle2caps |
|
1497 | 1497 | |
|
1498 | 1498 | if pullop.remotebookmarks is not None: |
|
1499 | 1499 | pullop.stepsdone.add('request-bookmarks') |
|
1500 | 1500 | |
|
1501 | 1501 | if ('request-bookmarks' not in pullop.stepsdone |
|
1502 | 1502 | and pullop.remotebookmarks is None |
|
1503 | 1503 | and not legacybookmark and hasbinarybook): |
|
1504 | 1504 | kwargs['bookmarks'] = True |
|
1505 | 1505 | bookmarksrequested = True |
|
1506 | 1506 | |
|
1507 | 1507 | if 'listkeys' in pullop.remotebundle2caps: |
|
1508 | 1508 | if 'request-bookmarks' not in pullop.stepsdone: |
|
1509 | 1509 | # make sure to always includes bookmark data when migrating |
|
1510 | 1510 | # `hg incoming --bundle` to using this function. |
|
1511 | 1511 | pullop.stepsdone.add('request-bookmarks') |
|
1512 | 1512 | kwargs.setdefault('listkeys', []).append('bookmarks') |
|
1513 | 1513 | |
|
1514 | 1514 | # If this is a full pull / clone and the server supports the clone bundles |
|
1515 | 1515 | # feature, tell the server whether we attempted a clone bundle. The |
|
1516 | 1516 | # presence of this flag indicates the client supports clone bundles. This |
|
1517 | 1517 | # will enable the server to treat clients that support clone bundles |
|
1518 | 1518 | # differently from those that don't. |
|
1519 | 1519 | if (pullop.remote.capable('clonebundles') |
|
1520 | 1520 | and pullop.heads is None and list(pullop.common) == [nullid]): |
|
1521 | 1521 | kwargs['cbattempted'] = pullop.clonebundleattempted |
|
1522 | 1522 | |
|
1523 | 1523 | if streaming: |
|
1524 | 1524 | pullop.repo.ui.status(_('streaming all changes\n')) |
|
1525 | 1525 | elif not pullop.fetch: |
|
1526 | 1526 | pullop.repo.ui.status(_("no changes found\n")) |
|
1527 | 1527 | pullop.cgresult = 0 |
|
1528 | 1528 | else: |
|
1529 | 1529 | if pullop.heads is None and list(pullop.common) == [nullid]: |
|
1530 | 1530 | pullop.repo.ui.status(_("requesting all changes\n")) |
|
1531 | 1531 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): |
|
1532 | 1532 | remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps) |
|
1533 | 1533 | if obsolete.commonversion(remoteversions) is not None: |
|
1534 | 1534 | kwargs['obsmarkers'] = True |
|
1535 | 1535 | pullop.stepsdone.add('obsmarkers') |
|
1536 | 1536 | _pullbundle2extraprepare(pullop, kwargs) |
|
1537 | 1537 | bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs)) |
|
1538 | 1538 | try: |
|
1539 | 1539 | op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction) |
|
1540 | 1540 | op.modes['bookmarks'] = 'records' |
|
1541 | 1541 | bundle2.processbundle(pullop.repo, bundle, op=op) |
|
1542 | 1542 | except bundle2.AbortFromPart as exc: |
|
1543 | 1543 | pullop.repo.ui.status(_('remote: abort: %s\n') % exc) |
|
1544 | 1544 | raise error.Abort(_('pull failed on remote'), hint=exc.hint) |
|
1545 | 1545 | except error.BundleValueError as exc: |
|
1546 | 1546 | raise error.Abort(_('missing support for %s') % exc) |
|
1547 | 1547 | |
|
1548 | 1548 | if pullop.fetch: |
|
1549 | 1549 | pullop.cgresult = bundle2.combinechangegroupresults(op) |
|
1550 | 1550 | |
|
1551 | 1551 | # processing phases change |
|
1552 | 1552 | for namespace, value in op.records['listkeys']: |
|
1553 | 1553 | if namespace == 'phases': |
|
1554 | 1554 | _pullapplyphases(pullop, value) |
|
1555 | 1555 | |
|
1556 | 1556 | # processing bookmark update |
|
1557 | 1557 | if bookmarksrequested: |
|
1558 | 1558 | books = {} |
|
1559 | 1559 | for record in op.records['bookmarks']: |
|
1560 | 1560 | books[record['bookmark']] = record["node"] |
|
1561 | 1561 | pullop.remotebookmarks = books |
|
1562 | 1562 | else: |
|
1563 | 1563 | for namespace, value in op.records['listkeys']: |
|
1564 | 1564 | if namespace == 'bookmarks': |
|
1565 | 1565 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(value) |
|
1566 | 1566 | |
|
1567 | 1567 | # bookmark data were either already there or pulled in the bundle |
|
1568 | 1568 | if pullop.remotebookmarks is not None: |
|
1569 | 1569 | _pullbookmarks(pullop) |
|
1570 | 1570 | |
|
1571 | 1571 | def _pullbundle2extraprepare(pullop, kwargs): |
|
1572 | 1572 | """hook function so that extensions can extend the getbundle call""" |
|
1573 | 1573 | |
|
1574 | 1574 | def _pullchangeset(pullop): |
|
1575 | 1575 | """pull changeset from unbundle into the local repo""" |
|
1576 | 1576 | # We delay the open of the transaction as late as possible so we |
|
1577 | 1577 | # don't open transaction for nothing or you break future useful |
|
1578 | 1578 | # rollback call |
|
1579 | 1579 | if 'changegroup' in pullop.stepsdone: |
|
1580 | 1580 | return |
|
1581 | 1581 | pullop.stepsdone.add('changegroup') |
|
1582 | 1582 | if not pullop.fetch: |
|
1583 | 1583 | pullop.repo.ui.status(_("no changes found\n")) |
|
1584 | 1584 | pullop.cgresult = 0 |
|
1585 | 1585 | return |
|
1586 | 1586 | tr = pullop.gettransaction() |
|
1587 | 1587 | if pullop.heads is None and list(pullop.common) == [nullid]: |
|
1588 | 1588 | pullop.repo.ui.status(_("requesting all changes\n")) |
|
1589 | 1589 | elif pullop.heads is None and pullop.remote.capable('changegroupsubset'): |
|
1590 | 1590 | # issue1320, avoid a race if remote changed after discovery |
|
1591 | 1591 | pullop.heads = pullop.rheads |
|
1592 | 1592 | |
|
1593 | 1593 | if pullop.remote.capable('getbundle'): |
|
1594 | 1594 | # TODO: get bundlecaps from remote |
|
1595 | 1595 | cg = pullop.remote.getbundle('pull', common=pullop.common, |
|
1596 | 1596 | heads=pullop.heads or pullop.rheads) |
|
1597 | 1597 | elif pullop.heads is None: |
|
1598 | 1598 | cg = pullop.remote.changegroup(pullop.fetch, 'pull') |
|
1599 | 1599 | elif not pullop.remote.capable('changegroupsubset'): |
|
1600 | 1600 | raise error.Abort(_("partial pull cannot be done because " |
|
1601 | 1601 | "other repository doesn't support " |
|
1602 | 1602 | "changegroupsubset.")) |
|
1603 | 1603 | else: |
|
1604 | 1604 | cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull') |
|
1605 | 1605 | bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull', |
|
1606 | 1606 | pullop.remote.url()) |
|
1607 | 1607 | pullop.cgresult = bundle2.combinechangegroupresults(bundleop) |
|
1608 | 1608 | |
|
1609 | 1609 | def _pullphase(pullop): |
|
1610 | 1610 | # Get remote phases data from remote |
|
1611 | 1611 | if 'phases' in pullop.stepsdone: |
|
1612 | 1612 | return |
|
1613 | 1613 | remotephases = pullop.remote.listkeys('phases') |
|
1614 | 1614 | _pullapplyphases(pullop, remotephases) |
|
1615 | 1615 | |
|
1616 | 1616 | def _pullapplyphases(pullop, remotephases): |
|
1617 | 1617 | """apply phase movement from observed remote state""" |
|
1618 | 1618 | if 'phases' in pullop.stepsdone: |
|
1619 | 1619 | return |
|
1620 | 1620 | pullop.stepsdone.add('phases') |
|
1621 | 1621 | publishing = bool(remotephases.get('publishing', False)) |
|
1622 | 1622 | if remotephases and not publishing: |
|
1623 | 1623 | # remote is new and non-publishing |
|
1624 | 1624 | pheads, _dr = phases.analyzeremotephases(pullop.repo, |
|
1625 | 1625 | pullop.pulledsubset, |
|
1626 | 1626 | remotephases) |
|
1627 | 1627 | dheads = pullop.pulledsubset |
|
1628 | 1628 | else: |
|
1629 | 1629 | # Remote is old or publishing all common changesets |
|
1630 | 1630 | # should be seen as public |
|
1631 | 1631 | pheads = pullop.pulledsubset |
|
1632 | 1632 | dheads = [] |
|
1633 | 1633 | unfi = pullop.repo.unfiltered() |
|
1634 | 1634 | phase = unfi._phasecache.phase |
|
1635 | 1635 | rev = unfi.changelog.nodemap.get |
|
1636 | 1636 | public = phases.public |
|
1637 | 1637 | draft = phases.draft |
|
1638 | 1638 | |
|
1639 | 1639 | # exclude changesets already public locally and update the others |
|
1640 | 1640 | pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public] |
|
1641 | 1641 | if pheads: |
|
1642 | 1642 | tr = pullop.gettransaction() |
|
1643 | 1643 | phases.advanceboundary(pullop.repo, tr, public, pheads) |
|
1644 | 1644 | |
|
1645 | 1645 | # exclude changesets already draft locally and update the others |
|
1646 | 1646 | dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft] |
|
1647 | 1647 | if dheads: |
|
1648 | 1648 | tr = pullop.gettransaction() |
|
1649 | 1649 | phases.advanceboundary(pullop.repo, tr, draft, dheads) |
|
1650 | 1650 | |
|
1651 | 1651 | def _pullbookmarks(pullop): |
|
1652 | 1652 | """process the remote bookmark information to update the local one""" |
|
1653 | 1653 | if 'bookmarks' in pullop.stepsdone: |
|
1654 | 1654 | return |
|
1655 | 1655 | pullop.stepsdone.add('bookmarks') |
|
1656 | 1656 | repo = pullop.repo |
|
1657 | 1657 | remotebookmarks = pullop.remotebookmarks |
|
1658 | 1658 | bookmod.updatefromremote(repo.ui, repo, remotebookmarks, |
|
1659 | 1659 | pullop.remote.url(), |
|
1660 | 1660 | pullop.gettransaction, |
|
1661 | 1661 | explicit=pullop.explicitbookmarks) |
|
1662 | 1662 | |
|
1663 | 1663 | def _pullobsolete(pullop): |
|
1664 | 1664 | """utility function to pull obsolete markers from a remote |
|
1665 | 1665 | |
|
1666 | 1666 | The `gettransaction` is function that return the pull transaction, creating |
|
1667 | 1667 | one if necessary. We return the transaction to inform the calling code that |
|
1668 | 1668 | a new transaction have been created (when applicable). |
|
1669 | 1669 | |
|
1670 | 1670 | Exists mostly to allow overriding for experimentation purpose""" |
|
1671 | 1671 | if 'obsmarkers' in pullop.stepsdone: |
|
1672 | 1672 | return |
|
1673 | 1673 | pullop.stepsdone.add('obsmarkers') |
|
1674 | 1674 | tr = None |
|
1675 | 1675 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): |
|
1676 | 1676 | pullop.repo.ui.debug('fetching remote obsolete markers\n') |
|
1677 | 1677 | remoteobs = pullop.remote.listkeys('obsolete') |
|
1678 | 1678 | if 'dump0' in remoteobs: |
|
1679 | 1679 | tr = pullop.gettransaction() |
|
1680 | 1680 | markers = [] |
|
1681 | 1681 | for key in sorted(remoteobs, reverse=True): |
|
1682 | 1682 | if key.startswith('dump'): |
|
1683 | 1683 | data = util.b85decode(remoteobs[key]) |
|
1684 | 1684 | version, newmarks = obsolete._readmarkers(data) |
|
1685 | 1685 | markers += newmarks |
|
1686 | 1686 | if markers: |
|
1687 | 1687 | pullop.repo.obsstore.add(tr, markers) |
|
1688 | 1688 | pullop.repo.invalidatevolatilesets() |
|
1689 | 1689 | return tr |
|
1690 | 1690 | |
|
1691 | 1691 | def caps20to10(repo, role): |
|
1692 | 1692 | """return a set with appropriate options to use bundle20 during getbundle""" |
|
1693 | 1693 | caps = {'HG20'} |
|
1694 | 1694 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role)) |
|
1695 | 1695 | caps.add('bundle2=' + urlreq.quote(capsblob)) |
|
1696 | 1696 | return caps |
|
1697 | 1697 | |
|
1698 | 1698 | # List of names of steps to perform for a bundle2 for getbundle, order matters. |
|
1699 | 1699 | getbundle2partsorder = [] |
|
1700 | 1700 | |
|
1701 | 1701 | # Mapping between step name and function |
|
1702 | 1702 | # |
|
1703 | 1703 | # This exists to help extensions wrap steps if necessary |
|
1704 | 1704 | getbundle2partsmapping = {} |
|
1705 | 1705 | |
|
1706 | 1706 | def getbundle2partsgenerator(stepname, idx=None): |
|
1707 | 1707 | """decorator for function generating bundle2 part for getbundle |
|
1708 | 1708 | |
|
1709 | 1709 | The function is added to the step -> function mapping and appended to the |
|
1710 | 1710 | list of steps. Beware that decorated functions will be added in order |
|
1711 | 1711 | (this may matter). |
|
1712 | 1712 | |
|
1713 | 1713 | You can only use this decorator for new steps, if you want to wrap a step |
|
1714 | 1714 | from an extension, attack the getbundle2partsmapping dictionary directly.""" |
|
1715 | 1715 | def dec(func): |
|
1716 | 1716 | assert stepname not in getbundle2partsmapping |
|
1717 | 1717 | getbundle2partsmapping[stepname] = func |
|
1718 | 1718 | if idx is None: |
|
1719 | 1719 | getbundle2partsorder.append(stepname) |
|
1720 | 1720 | else: |
|
1721 | 1721 | getbundle2partsorder.insert(idx, stepname) |
|
1722 | 1722 | return func |
|
1723 | 1723 | return dec |
|
1724 | 1724 | |
|
1725 | 1725 | def bundle2requested(bundlecaps): |
|
1726 | 1726 | if bundlecaps is not None: |
|
1727 | 1727 | return any(cap.startswith('HG2') for cap in bundlecaps) |
|
1728 | 1728 | return False |
|
1729 | 1729 | |
|
1730 | 1730 | def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None, |
|
1731 | 1731 | **kwargs): |
|
1732 | 1732 | """Return chunks constituting a bundle's raw data. |
|
1733 | 1733 | |
|
1734 | 1734 | Could be a bundle HG10 or a bundle HG20 depending on bundlecaps |
|
1735 | 1735 | passed. |
|
1736 | 1736 | |
|
1737 | 1737 | Returns a 2-tuple of a dict with metadata about the generated bundle |
|
1738 | 1738 | and an iterator over raw chunks (of varying sizes). |
|
1739 | 1739 | """ |
|
1740 | 1740 | kwargs = pycompat.byteskwargs(kwargs) |
|
1741 | 1741 | info = {} |
|
1742 | 1742 | usebundle2 = bundle2requested(bundlecaps) |
|
1743 | 1743 | # bundle10 case |
|
1744 | 1744 | if not usebundle2: |
|
1745 | 1745 | if bundlecaps and not kwargs.get('cg', True): |
|
1746 | 1746 | raise ValueError(_('request for bundle10 must include changegroup')) |
|
1747 | 1747 | |
|
1748 | 1748 | if kwargs: |
|
1749 | 1749 | raise ValueError(_('unsupported getbundle arguments: %s') |
|
1750 | 1750 | % ', '.join(sorted(kwargs.keys()))) |
|
1751 | 1751 | outgoing = _computeoutgoing(repo, heads, common) |
|
1752 | 1752 | info['bundleversion'] = 1 |
|
1753 | 1753 | return info, changegroup.makestream(repo, outgoing, '01', source, |
|
1754 | 1754 | bundlecaps=bundlecaps) |
|
1755 | 1755 | |
|
1756 | 1756 | # bundle20 case |
|
1757 | 1757 | info['bundleversion'] = 2 |
|
1758 | 1758 | b2caps = {} |
|
1759 | 1759 | for bcaps in bundlecaps: |
|
1760 | 1760 | if bcaps.startswith('bundle2='): |
|
1761 | 1761 | blob = urlreq.unquote(bcaps[len('bundle2='):]) |
|
1762 | 1762 | b2caps.update(bundle2.decodecaps(blob)) |
|
1763 | 1763 | bundler = bundle2.bundle20(repo.ui, b2caps) |
|
1764 | 1764 | |
|
1765 | 1765 | kwargs['heads'] = heads |
|
1766 | 1766 | kwargs['common'] = common |
|
1767 | 1767 | |
|
1768 | 1768 | for name in getbundle2partsorder: |
|
1769 | 1769 | func = getbundle2partsmapping[name] |
|
1770 | 1770 | func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps, |
|
1771 | 1771 | **pycompat.strkwargs(kwargs)) |
|
1772 | 1772 | |
|
1773 | 1773 | info['prefercompressed'] = bundler.prefercompressed |
|
1774 | 1774 | |
|
1775 | 1775 | return info, bundler.getchunks() |
|
1776 | 1776 | |
|
1777 | 1777 | @getbundle2partsgenerator('stream2') |
|
1778 | 1778 | def _getbundlestream2(bundler, repo, source, bundlecaps=None, |
|
1779 | 1779 | b2caps=None, heads=None, common=None, **kwargs): |
|
1780 | 1780 | if not kwargs.get('stream', False): |
|
1781 | 1781 | return |
|
1782 | 1782 | |
|
1783 | 1783 | if not streamclone.allowservergeneration(repo): |
|
1784 | 1784 | raise error.Abort(_('stream data requested but server does not allow ' |
|
1785 | 1785 | 'this feature'), |
|
1786 | 1786 | hint=_('well-behaved clients should not be ' |
|
1787 | 1787 | 'requesting stream data from servers not ' |
|
1788 | 1788 | 'advertising it; the client may be buggy')) |
|
1789 | 1789 | |
|
1790 | 1790 | # Stream clones don't compress well. And compression undermines a |
|
1791 | 1791 | # goal of stream clones, which is to be fast. Communicate the desire |
|
1792 | 1792 | # to avoid compression to consumers of the bundle. |
|
1793 | 1793 | bundler.prefercompressed = False |
|
1794 | 1794 | |
|
1795 | 1795 | filecount, bytecount, it = streamclone.generatev2(repo) |
|
1796 | 1796 | requirements = _formatrequirementsspec(repo.requirements) |
|
1797 | 1797 | part = bundler.newpart('stream2', data=it) |
|
1798 | 1798 | part.addparam('bytecount', '%d' % bytecount, mandatory=True) |
|
1799 | 1799 | part.addparam('filecount', '%d' % filecount, mandatory=True) |
|
1800 | 1800 | part.addparam('requirements', requirements, mandatory=True) |
|
1801 | 1801 | |
|
1802 | 1802 | @getbundle2partsgenerator('changegroup') |
|
1803 | 1803 | def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None, |
|
1804 | 1804 | b2caps=None, heads=None, common=None, **kwargs): |
|
1805 | 1805 | """add a changegroup part to the requested bundle""" |
|
1806 | 1806 | cgstream = None |
|
1807 | 1807 | if kwargs.get(r'cg', True): |
|
1808 | 1808 | # build changegroup bundle here. |
|
1809 | 1809 | version = '01' |
|
1810 | 1810 | cgversions = b2caps.get('changegroup') |
|
1811 | 1811 | if cgversions: # 3.1 and 3.2 ship with an empty value |
|
1812 | 1812 | cgversions = [v for v in cgversions |
|
1813 | 1813 | if v in changegroup.supportedoutgoingversions(repo)] |
|
1814 | 1814 | if not cgversions: |
|
1815 | 1815 | raise ValueError(_('no common changegroup version')) |
|
1816 | 1816 | version = max(cgversions) |
|
1817 | 1817 | outgoing = _computeoutgoing(repo, heads, common) |
|
1818 | 1818 | if outgoing.missing: |
|
1819 | 1819 | cgstream = changegroup.makestream(repo, outgoing, version, source, |
|
1820 | 1820 | bundlecaps=bundlecaps) |
|
1821 | 1821 | |
|
1822 | 1822 | if cgstream: |
|
1823 | 1823 | part = bundler.newpart('changegroup', data=cgstream) |
|
1824 | 1824 | if cgversions: |
|
1825 | 1825 | part.addparam('version', version) |
|
1826 | 1826 | part.addparam('nbchanges', '%d' % len(outgoing.missing), |
|
1827 | 1827 | mandatory=False) |
|
1828 | 1828 | if 'treemanifest' in repo.requirements: |
|
1829 | 1829 | part.addparam('treemanifest', '1') |
|
1830 | 1830 | |
|
1831 | 1831 | @getbundle2partsgenerator('bookmarks') |
|
1832 | 1832 | def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None, |
|
1833 | 1833 | b2caps=None, **kwargs): |
|
1834 | 1834 | """add a bookmark part to the requested bundle""" |
|
1835 | 1835 | if not kwargs.get(r'bookmarks', False): |
|
1836 | 1836 | return |
|
1837 | 1837 | if 'bookmarks' not in b2caps: |
|
1838 | 1838 | raise ValueError(_('no common bookmarks exchange method')) |
|
1839 | 1839 | books = bookmod.listbinbookmarks(repo) |
|
1840 | 1840 | data = bookmod.binaryencode(books) |
|
1841 | 1841 | if data: |
|
1842 | 1842 | bundler.newpart('bookmarks', data=data) |
|
1843 | 1843 | |
|
1844 | 1844 | @getbundle2partsgenerator('listkeys') |
|
1845 | 1845 | def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None, |
|
1846 | 1846 | b2caps=None, **kwargs): |
|
1847 | 1847 | """add parts containing listkeys namespaces to the requested bundle""" |
|
1848 | 1848 | listkeys = kwargs.get(r'listkeys', ()) |
|
1849 | 1849 | for namespace in listkeys: |
|
1850 | 1850 | part = bundler.newpart('listkeys') |
|
1851 | 1851 | part.addparam('namespace', namespace) |
|
1852 | 1852 | keys = repo.listkeys(namespace).items() |
|
1853 | 1853 | part.data = pushkey.encodekeys(keys) |
|
1854 | 1854 | |
|
1855 | 1855 | @getbundle2partsgenerator('obsmarkers') |
|
1856 | 1856 | def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None, |
|
1857 | 1857 | b2caps=None, heads=None, **kwargs): |
|
1858 | 1858 | """add an obsolescence markers part to the requested bundle""" |
|
1859 | 1859 | if kwargs.get(r'obsmarkers', False): |
|
1860 | 1860 | if heads is None: |
|
1861 | 1861 | heads = repo.heads() |
|
1862 | 1862 | subset = [c.node() for c in repo.set('::%ln', heads)] |
|
1863 | 1863 | markers = repo.obsstore.relevantmarkers(subset) |
|
1864 | 1864 | markers = sorted(markers) |
|
1865 | 1865 | bundle2.buildobsmarkerspart(bundler, markers) |
|
1866 | 1866 | |
|
1867 | 1867 | @getbundle2partsgenerator('phases') |
|
1868 | 1868 | def _getbundlephasespart(bundler, repo, source, bundlecaps=None, |
|
1869 | 1869 | b2caps=None, heads=None, **kwargs): |
|
1870 | 1870 | """add phase heads part to the requested bundle""" |
|
1871 | 1871 | if kwargs.get(r'phases', False): |
|
1872 | 1872 | if not 'heads' in b2caps.get('phases'): |
|
1873 | 1873 | raise ValueError(_('no common phases exchange method')) |
|
1874 | 1874 | if heads is None: |
|
1875 | 1875 | heads = repo.heads() |
|
1876 | 1876 | |
|
1877 | 1877 | headsbyphase = collections.defaultdict(set) |
|
1878 | 1878 | if repo.publishing(): |
|
1879 | 1879 | headsbyphase[phases.public] = heads |
|
1880 | 1880 | else: |
|
1881 | 1881 | # find the appropriate heads to move |
|
1882 | 1882 | |
|
1883 | 1883 | phase = repo._phasecache.phase |
|
1884 | 1884 | node = repo.changelog.node |
|
1885 | 1885 | rev = repo.changelog.rev |
|
1886 | 1886 | for h in heads: |
|
1887 | 1887 | headsbyphase[phase(repo, rev(h))].add(h) |
|
1888 | 1888 | seenphases = list(headsbyphase.keys()) |
|
1889 | 1889 | |
|
1890 | 1890 | # We do not handle anything but public and draft phase for now) |
|
1891 | 1891 | if seenphases: |
|
1892 | 1892 | assert max(seenphases) <= phases.draft |
|
1893 | 1893 | |
|
1894 | 1894 | # if client is pulling non-public changesets, we need to find |
|
1895 | 1895 | # intermediate public heads. |
|
1896 | 1896 | draftheads = headsbyphase.get(phases.draft, set()) |
|
1897 | 1897 | if draftheads: |
|
1898 | 1898 | publicheads = headsbyphase.get(phases.public, set()) |
|
1899 | 1899 | |
|
1900 | 1900 | revset = 'heads(only(%ln, %ln) and public())' |
|
1901 | 1901 | extraheads = repo.revs(revset, draftheads, publicheads) |
|
1902 | 1902 | for r in extraheads: |
|
1903 | 1903 | headsbyphase[phases.public].add(node(r)) |
|
1904 | 1904 | |
|
1905 | 1905 | # transform data in a format used by the encoding function |
|
1906 | 1906 | phasemapping = [] |
|
1907 | 1907 | for phase in phases.allphases: |
|
1908 | 1908 | phasemapping.append(sorted(headsbyphase[phase])) |
|
1909 | 1909 | |
|
1910 | 1910 | # generate the actual part |
|
1911 | 1911 | phasedata = phases.binaryencode(phasemapping) |
|
1912 | 1912 | bundler.newpart('phase-heads', data=phasedata) |
|
1913 | 1913 | |
|
1914 | 1914 | @getbundle2partsgenerator('hgtagsfnodes') |
|
1915 | 1915 | def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None, |
|
1916 | 1916 | b2caps=None, heads=None, common=None, |
|
1917 | 1917 | **kwargs): |
|
1918 | 1918 | """Transfer the .hgtags filenodes mapping. |
|
1919 | 1919 | |
|
1920 | 1920 | Only values for heads in this bundle will be transferred. |
|
1921 | 1921 | |
|
1922 | 1922 | The part data consists of pairs of 20 byte changeset node and .hgtags |
|
1923 | 1923 | filenodes raw values. |
|
1924 | 1924 | """ |
|
1925 | 1925 | # Don't send unless: |
|
1926 | 1926 | # - changeset are being exchanged, |
|
1927 | 1927 | # - the client supports it. |
|
1928 | 1928 | if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps): |
|
1929 | 1929 | return |
|
1930 | 1930 | |
|
1931 | 1931 | outgoing = _computeoutgoing(repo, heads, common) |
|
1932 | 1932 | bundle2.addparttagsfnodescache(repo, bundler, outgoing) |
|
1933 | 1933 | |
|
1934 | 1934 | def check_heads(repo, their_heads, context): |
|
1935 | 1935 | """check if the heads of a repo have been modified |
|
1936 | 1936 | |
|
1937 | 1937 | Used by peer for unbundling. |
|
1938 | 1938 | """ |
|
1939 | 1939 | heads = repo.heads() |
|
1940 | 1940 | heads_hash = hashlib.sha1(''.join(sorted(heads))).digest() |
|
1941 | 1941 | if not (their_heads == ['force'] or their_heads == heads or |
|
1942 | 1942 | their_heads == ['hashed', heads_hash]): |
|
1943 | 1943 | # someone else committed/pushed/unbundled while we |
|
1944 | 1944 | # were transferring data |
|
1945 | 1945 | raise error.PushRaced('repository changed while %s - ' |
|
1946 | 1946 | 'please try again' % context) |
|
1947 | 1947 | |
|
1948 | 1948 | def unbundle(repo, cg, heads, source, url): |
|
1949 | 1949 | """Apply a bundle to a repo. |
|
1950 | 1950 | |
|
1951 | 1951 | this function makes sure the repo is locked during the application and have |
|
1952 | 1952 | mechanism to check that no push race occurred between the creation of the |
|
1953 | 1953 | bundle and its application. |
|
1954 | 1954 | |
|
1955 | 1955 | If the push was raced as PushRaced exception is raised.""" |
|
1956 | 1956 | r = 0 |
|
1957 | 1957 | # need a transaction when processing a bundle2 stream |
|
1958 | 1958 | # [wlock, lock, tr] - needs to be an array so nested functions can modify it |
|
1959 | 1959 | lockandtr = [None, None, None] |
|
1960 | 1960 | recordout = None |
|
1961 | 1961 | # quick fix for output mismatch with bundle2 in 3.4 |
|
1962 | 1962 | captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture') |
|
1963 | 1963 | if url.startswith('remote:http:') or url.startswith('remote:https:'): |
|
1964 | 1964 | captureoutput = True |
|
1965 | 1965 | try: |
|
1966 | 1966 | # note: outside bundle1, 'heads' is expected to be empty and this |
|
1967 | 1967 | # 'check_heads' call wil be a no-op |
|
1968 | 1968 | check_heads(repo, heads, 'uploading changes') |
|
1969 | 1969 | # push can proceed |
|
1970 | 1970 | if not isinstance(cg, bundle2.unbundle20): |
|
1971 | 1971 | # legacy case: bundle1 (changegroup 01) |
|
1972 | 1972 | txnname = "\n".join([source, util.hidepassword(url)]) |
|
1973 | 1973 | with repo.lock(), repo.transaction(txnname) as tr: |
|
1974 | 1974 | op = bundle2.applybundle(repo, cg, tr, source, url) |
|
1975 | 1975 | r = bundle2.combinechangegroupresults(op) |
|
1976 | 1976 | else: |
|
1977 | 1977 | r = None |
|
1978 | 1978 | try: |
|
1979 | 1979 | def gettransaction(): |
|
1980 | 1980 | if not lockandtr[2]: |
|
1981 | 1981 | lockandtr[0] = repo.wlock() |
|
1982 | 1982 | lockandtr[1] = repo.lock() |
|
1983 | 1983 | lockandtr[2] = repo.transaction(source) |
|
1984 | 1984 | lockandtr[2].hookargs['source'] = source |
|
1985 | 1985 | lockandtr[2].hookargs['url'] = url |
|
1986 | 1986 | lockandtr[2].hookargs['bundle2'] = '1' |
|
1987 | 1987 | return lockandtr[2] |
|
1988 | 1988 | |
|
1989 | 1989 | # Do greedy locking by default until we're satisfied with lazy |
|
1990 | 1990 | # locking. |
|
1991 | 1991 | if not repo.ui.configbool('experimental', 'bundle2lazylocking'): |
|
1992 | 1992 | gettransaction() |
|
1993 | 1993 | |
|
1994 | 1994 | op = bundle2.bundleoperation(repo, gettransaction, |
|
1995 | 1995 | captureoutput=captureoutput) |
|
1996 | 1996 | try: |
|
1997 | 1997 | op = bundle2.processbundle(repo, cg, op=op) |
|
1998 | 1998 | finally: |
|
1999 | 1999 | r = op.reply |
|
2000 | 2000 | if captureoutput and r is not None: |
|
2001 | 2001 | repo.ui.pushbuffer(error=True, subproc=True) |
|
2002 | 2002 | def recordout(output): |
|
2003 | 2003 | r.newpart('output', data=output, mandatory=False) |
|
2004 | 2004 | if lockandtr[2] is not None: |
|
2005 | 2005 | lockandtr[2].close() |
|
2006 | 2006 | except BaseException as exc: |
|
2007 | 2007 | exc.duringunbundle2 = True |
|
2008 | 2008 | if captureoutput and r is not None: |
|
2009 | 2009 | parts = exc._bundle2salvagedoutput = r.salvageoutput() |
|
2010 | 2010 | def recordout(output): |
|
2011 | 2011 | part = bundle2.bundlepart('output', data=output, |
|
2012 | 2012 | mandatory=False) |
|
2013 | 2013 | parts.append(part) |
|
2014 | 2014 | raise |
|
2015 | 2015 | finally: |
|
2016 | 2016 | lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0]) |
|
2017 | 2017 | if recordout is not None: |
|
2018 | 2018 | recordout(repo.ui.popbuffer()) |
|
2019 | 2019 | return r |
|
2020 | 2020 | |
|
2021 | 2021 | def _maybeapplyclonebundle(pullop): |
|
2022 | 2022 | """Apply a clone bundle from a remote, if possible.""" |
|
2023 | 2023 | |
|
2024 | 2024 | repo = pullop.repo |
|
2025 | 2025 | remote = pullop.remote |
|
2026 | 2026 | |
|
2027 | 2027 | if not repo.ui.configbool('ui', 'clonebundles'): |
|
2028 | 2028 | return |
|
2029 | 2029 | |
|
2030 | 2030 | # Only run if local repo is empty. |
|
2031 | 2031 | if len(repo): |
|
2032 | 2032 | return |
|
2033 | 2033 | |
|
2034 | 2034 | if pullop.heads: |
|
2035 | 2035 | return |
|
2036 | 2036 | |
|
2037 | 2037 | if not remote.capable('clonebundles'): |
|
2038 | 2038 | return |
|
2039 | 2039 | |
|
2040 | 2040 | res = remote._call('clonebundles') |
|
2041 | 2041 | |
|
2042 | 2042 | # If we call the wire protocol command, that's good enough to record the |
|
2043 | 2043 | # attempt. |
|
2044 | 2044 | pullop.clonebundleattempted = True |
|
2045 | 2045 | |
|
2046 | 2046 | entries = parseclonebundlesmanifest(repo, res) |
|
2047 | 2047 | if not entries: |
|
2048 | 2048 | repo.ui.note(_('no clone bundles available on remote; ' |
|
2049 | 2049 | 'falling back to regular clone\n')) |
|
2050 | 2050 | return |
|
2051 | 2051 | |
|
2052 | 2052 | entries = filterclonebundleentries( |
|
2053 | 2053 | repo, entries, streamclonerequested=pullop.streamclonerequested) |
|
2054 | 2054 | |
|
2055 | 2055 | if not entries: |
|
2056 | 2056 | # There is a thundering herd concern here. However, if a server |
|
2057 | 2057 | # operator doesn't advertise bundles appropriate for its clients, |
|
2058 | 2058 | # they deserve what's coming. Furthermore, from a client's |
|
2059 | 2059 | # perspective, no automatic fallback would mean not being able to |
|
2060 | 2060 | # clone! |
|
2061 | 2061 | repo.ui.warn(_('no compatible clone bundles available on server; ' |
|
2062 | 2062 | 'falling back to regular clone\n')) |
|
2063 | 2063 | repo.ui.warn(_('(you may want to report this to the server ' |
|
2064 | 2064 | 'operator)\n')) |
|
2065 | 2065 | return |
|
2066 | 2066 | |
|
2067 | 2067 | entries = sortclonebundleentries(repo.ui, entries) |
|
2068 | 2068 | |
|
2069 | 2069 | url = entries[0]['URL'] |
|
2070 | 2070 | repo.ui.status(_('applying clone bundle from %s\n') % url) |
|
2071 | 2071 | if trypullbundlefromurl(repo.ui, repo, url): |
|
2072 | 2072 | repo.ui.status(_('finished applying clone bundle\n')) |
|
2073 | 2073 | # Bundle failed. |
|
2074 | 2074 | # |
|
2075 | 2075 | # We abort by default to avoid the thundering herd of |
|
2076 | 2076 | # clients flooding a server that was expecting expensive |
|
2077 | 2077 | # clone load to be offloaded. |
|
2078 | 2078 | elif repo.ui.configbool('ui', 'clonebundlefallback'): |
|
2079 | 2079 | repo.ui.warn(_('falling back to normal clone\n')) |
|
2080 | 2080 | else: |
|
2081 | 2081 | raise error.Abort(_('error applying bundle'), |
|
2082 | 2082 | hint=_('if this error persists, consider contacting ' |
|
2083 | 2083 | 'the server operator or disable clone ' |
|
2084 | 2084 | 'bundles via ' |
|
2085 | 2085 | '"--config ui.clonebundles=false"')) |
|
2086 | 2086 | |
|
2087 | 2087 | def parseclonebundlesmanifest(repo, s): |
|
2088 | 2088 | """Parses the raw text of a clone bundles manifest. |
|
2089 | 2089 | |
|
2090 | 2090 | Returns a list of dicts. The dicts have a ``URL`` key corresponding |
|
2091 | 2091 | to the URL and other keys are the attributes for the entry. |
|
2092 | 2092 | """ |
|
2093 | 2093 | m = [] |
|
2094 | 2094 | for line in s.splitlines(): |
|
2095 | 2095 | fields = line.split() |
|
2096 | 2096 | if not fields: |
|
2097 | 2097 | continue |
|
2098 | 2098 | attrs = {'URL': fields[0]} |
|
2099 | 2099 | for rawattr in fields[1:]: |
|
2100 | 2100 | key, value = rawattr.split('=', 1) |
|
2101 | 2101 | key = urlreq.unquote(key) |
|
2102 | 2102 | value = urlreq.unquote(value) |
|
2103 | 2103 | attrs[key] = value |
|
2104 | 2104 | |
|
2105 | 2105 | # Parse BUNDLESPEC into components. This makes client-side |
|
2106 | 2106 | # preferences easier to specify since you can prefer a single |
|
2107 | 2107 | # component of the BUNDLESPEC. |
|
2108 | 2108 | if key == 'BUNDLESPEC': |
|
2109 | 2109 | try: |
|
2110 | 2110 | comp, version, params = parsebundlespec(repo, value, |
|
2111 | 2111 | externalnames=True) |
|
2112 | 2112 | attrs['COMPRESSION'] = comp |
|
2113 | 2113 | attrs['VERSION'] = version |
|
2114 | 2114 | except error.InvalidBundleSpecification: |
|
2115 | 2115 | pass |
|
2116 | 2116 | except error.UnsupportedBundleSpecification: |
|
2117 | 2117 | pass |
|
2118 | 2118 | |
|
2119 | 2119 | m.append(attrs) |
|
2120 | 2120 | |
|
2121 | 2121 | return m |
|
2122 | 2122 | |
|
2123 | 2123 | def filterclonebundleentries(repo, entries, streamclonerequested=False): |
|
2124 | 2124 | """Remove incompatible clone bundle manifest entries. |
|
2125 | 2125 | |
|
2126 | 2126 | Accepts a list of entries parsed with ``parseclonebundlesmanifest`` |
|
2127 | 2127 | and returns a new list consisting of only the entries that this client |
|
2128 | 2128 | should be able to apply. |
|
2129 | 2129 | |
|
2130 | 2130 | There is no guarantee we'll be able to apply all returned entries because |
|
2131 | 2131 | the metadata we use to filter on may be missing or wrong. |
|
2132 | 2132 | """ |
|
2133 | 2133 | newentries = [] |
|
2134 | 2134 | for entry in entries: |
|
2135 | 2135 | spec = entry.get('BUNDLESPEC') |
|
2136 | 2136 | if spec: |
|
2137 | 2137 | try: |
|
2138 | 2138 | comp, version, params = parsebundlespec(repo, spec, strict=True) |
|
2139 | 2139 | |
|
2140 | 2140 | # If a stream clone was requested, filter out non-streamclone |
|
2141 | 2141 | # entries. |
|
2142 | 2142 | if streamclonerequested and (comp != 'UN' or version != 's1'): |
|
2143 | 2143 | repo.ui.debug('filtering %s because not a stream clone\n' % |
|
2144 | 2144 | entry['URL']) |
|
2145 | 2145 | continue |
|
2146 | 2146 | |
|
2147 | 2147 | except error.InvalidBundleSpecification as e: |
|
2148 | 2148 | repo.ui.debug(str(e) + '\n') |
|
2149 | 2149 | continue |
|
2150 | 2150 | except error.UnsupportedBundleSpecification as e: |
|
2151 | 2151 | repo.ui.debug('filtering %s because unsupported bundle ' |
|
2152 |
'spec: %s\n' % ( |
|
|
2152 | 'spec: %s\n' % ( | |
|
2153 | entry['URL'], util.forcebytestr(e))) | |
|
2153 | 2154 | continue |
|
2154 | 2155 | # If we don't have a spec and requested a stream clone, we don't know |
|
2155 | 2156 | # what the entry is so don't attempt to apply it. |
|
2156 | 2157 | elif streamclonerequested: |
|
2157 | 2158 | repo.ui.debug('filtering %s because cannot determine if a stream ' |
|
2158 | 2159 | 'clone bundle\n' % entry['URL']) |
|
2159 | 2160 | continue |
|
2160 | 2161 | |
|
2161 | 2162 | if 'REQUIRESNI' in entry and not sslutil.hassni: |
|
2162 | 2163 | repo.ui.debug('filtering %s because SNI not supported\n' % |
|
2163 | 2164 | entry['URL']) |
|
2164 | 2165 | continue |
|
2165 | 2166 | |
|
2166 | 2167 | newentries.append(entry) |
|
2167 | 2168 | |
|
2168 | 2169 | return newentries |
|
2169 | 2170 | |
|
2170 | 2171 | class clonebundleentry(object): |
|
2171 | 2172 | """Represents an item in a clone bundles manifest. |
|
2172 | 2173 | |
|
2173 | 2174 | This rich class is needed to support sorting since sorted() in Python 3 |
|
2174 | 2175 | doesn't support ``cmp`` and our comparison is complex enough that ``key=`` |
|
2175 | 2176 | won't work. |
|
2176 | 2177 | """ |
|
2177 | 2178 | |
|
2178 | 2179 | def __init__(self, value, prefers): |
|
2179 | 2180 | self.value = value |
|
2180 | 2181 | self.prefers = prefers |
|
2181 | 2182 | |
|
2182 | 2183 | def _cmp(self, other): |
|
2183 | 2184 | for prefkey, prefvalue in self.prefers: |
|
2184 | 2185 | avalue = self.value.get(prefkey) |
|
2185 | 2186 | bvalue = other.value.get(prefkey) |
|
2186 | 2187 | |
|
2187 | 2188 | # Special case for b missing attribute and a matches exactly. |
|
2188 | 2189 | if avalue is not None and bvalue is None and avalue == prefvalue: |
|
2189 | 2190 | return -1 |
|
2190 | 2191 | |
|
2191 | 2192 | # Special case for a missing attribute and b matches exactly. |
|
2192 | 2193 | if bvalue is not None and avalue is None and bvalue == prefvalue: |
|
2193 | 2194 | return 1 |
|
2194 | 2195 | |
|
2195 | 2196 | # We can't compare unless attribute present on both. |
|
2196 | 2197 | if avalue is None or bvalue is None: |
|
2197 | 2198 | continue |
|
2198 | 2199 | |
|
2199 | 2200 | # Same values should fall back to next attribute. |
|
2200 | 2201 | if avalue == bvalue: |
|
2201 | 2202 | continue |
|
2202 | 2203 | |
|
2203 | 2204 | # Exact matches come first. |
|
2204 | 2205 | if avalue == prefvalue: |
|
2205 | 2206 | return -1 |
|
2206 | 2207 | if bvalue == prefvalue: |
|
2207 | 2208 | return 1 |
|
2208 | 2209 | |
|
2209 | 2210 | # Fall back to next attribute. |
|
2210 | 2211 | continue |
|
2211 | 2212 | |
|
2212 | 2213 | # If we got here we couldn't sort by attributes and prefers. Fall |
|
2213 | 2214 | # back to index order. |
|
2214 | 2215 | return 0 |
|
2215 | 2216 | |
|
2216 | 2217 | def __lt__(self, other): |
|
2217 | 2218 | return self._cmp(other) < 0 |
|
2218 | 2219 | |
|
2219 | 2220 | def __gt__(self, other): |
|
2220 | 2221 | return self._cmp(other) > 0 |
|
2221 | 2222 | |
|
2222 | 2223 | def __eq__(self, other): |
|
2223 | 2224 | return self._cmp(other) == 0 |
|
2224 | 2225 | |
|
2225 | 2226 | def __le__(self, other): |
|
2226 | 2227 | return self._cmp(other) <= 0 |
|
2227 | 2228 | |
|
2228 | 2229 | def __ge__(self, other): |
|
2229 | 2230 | return self._cmp(other) >= 0 |
|
2230 | 2231 | |
|
2231 | 2232 | def __ne__(self, other): |
|
2232 | 2233 | return self._cmp(other) != 0 |
|
2233 | 2234 | |
|
2234 | 2235 | def sortclonebundleentries(ui, entries): |
|
2235 | 2236 | prefers = ui.configlist('ui', 'clonebundleprefers') |
|
2236 | 2237 | if not prefers: |
|
2237 | 2238 | return list(entries) |
|
2238 | 2239 | |
|
2239 | 2240 | prefers = [p.split('=', 1) for p in prefers] |
|
2240 | 2241 | |
|
2241 | 2242 | items = sorted(clonebundleentry(v, prefers) for v in entries) |
|
2242 | 2243 | return [i.value for i in items] |
|
2243 | 2244 | |
|
2244 | 2245 | def trypullbundlefromurl(ui, repo, url): |
|
2245 | 2246 | """Attempt to apply a bundle from a URL.""" |
|
2246 | 2247 | with repo.lock(), repo.transaction('bundleurl') as tr: |
|
2247 | 2248 | try: |
|
2248 | 2249 | fh = urlmod.open(ui, url) |
|
2249 | 2250 | cg = readbundle(ui, fh, 'stream') |
|
2250 | 2251 | |
|
2251 | 2252 | if isinstance(cg, streamclone.streamcloneapplier): |
|
2252 | 2253 | cg.apply(repo) |
|
2253 | 2254 | else: |
|
2254 | 2255 | bundle2.applybundle(repo, cg, tr, 'clonebundles', url) |
|
2255 | 2256 | return True |
|
2256 | 2257 | except urlerr.httperror as e: |
|
2257 |
ui.warn(_('HTTP error fetching bundle: %s\n') % |
|
|
2258 | ui.warn(_('HTTP error fetching bundle: %s\n') % | |
|
2259 | util.forcebytestr(e)) | |
|
2258 | 2260 | except urlerr.urlerror as e: |
|
2259 | 2261 | ui.warn(_('error fetching bundle: %s\n') % e.reason) |
|
2260 | 2262 | |
|
2261 | 2263 | return False |
@@ -1,2276 +1,2277 b'' | |||
|
1 | 1 | # localrepo.py - read/write repository class for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import hashlib |
|
12 | 12 | import os |
|
13 | 13 | import random |
|
14 | 14 | import time |
|
15 | 15 | import weakref |
|
16 | 16 | |
|
17 | 17 | from .i18n import _ |
|
18 | 18 | from .node import ( |
|
19 | 19 | hex, |
|
20 | 20 | nullid, |
|
21 | 21 | short, |
|
22 | 22 | ) |
|
23 | 23 | from . import ( |
|
24 | 24 | bookmarks, |
|
25 | 25 | branchmap, |
|
26 | 26 | bundle2, |
|
27 | 27 | changegroup, |
|
28 | 28 | changelog, |
|
29 | 29 | color, |
|
30 | 30 | context, |
|
31 | 31 | dirstate, |
|
32 | 32 | dirstateguard, |
|
33 | 33 | discovery, |
|
34 | 34 | encoding, |
|
35 | 35 | error, |
|
36 | 36 | exchange, |
|
37 | 37 | extensions, |
|
38 | 38 | filelog, |
|
39 | 39 | hook, |
|
40 | 40 | lock as lockmod, |
|
41 | 41 | manifest, |
|
42 | 42 | match as matchmod, |
|
43 | 43 | merge as mergemod, |
|
44 | 44 | mergeutil, |
|
45 | 45 | namespaces, |
|
46 | 46 | obsolete, |
|
47 | 47 | pathutil, |
|
48 | 48 | peer, |
|
49 | 49 | phases, |
|
50 | 50 | pushkey, |
|
51 | 51 | pycompat, |
|
52 | 52 | repository, |
|
53 | 53 | repoview, |
|
54 | 54 | revset, |
|
55 | 55 | revsetlang, |
|
56 | 56 | scmutil, |
|
57 | 57 | sparse, |
|
58 | 58 | store, |
|
59 | 59 | subrepoutil, |
|
60 | 60 | tags as tagsmod, |
|
61 | 61 | transaction, |
|
62 | 62 | txnutil, |
|
63 | 63 | util, |
|
64 | 64 | vfs as vfsmod, |
|
65 | 65 | ) |
|
66 | 66 | |
|
67 | 67 | release = lockmod.release |
|
68 | 68 | urlerr = util.urlerr |
|
69 | 69 | urlreq = util.urlreq |
|
70 | 70 | |
|
71 | 71 | # set of (path, vfs-location) tuples. vfs-location is: |
|
72 | 72 | # - 'plain for vfs relative paths |
|
73 | 73 | # - '' for svfs relative paths |
|
74 | 74 | _cachedfiles = set() |
|
75 | 75 | |
|
76 | 76 | class _basefilecache(scmutil.filecache): |
|
77 | 77 | """All filecache usage on repo are done for logic that should be unfiltered |
|
78 | 78 | """ |
|
79 | 79 | def __get__(self, repo, type=None): |
|
80 | 80 | if repo is None: |
|
81 | 81 | return self |
|
82 | 82 | return super(_basefilecache, self).__get__(repo.unfiltered(), type) |
|
83 | 83 | def __set__(self, repo, value): |
|
84 | 84 | return super(_basefilecache, self).__set__(repo.unfiltered(), value) |
|
85 | 85 | def __delete__(self, repo): |
|
86 | 86 | return super(_basefilecache, self).__delete__(repo.unfiltered()) |
|
87 | 87 | |
|
88 | 88 | class repofilecache(_basefilecache): |
|
89 | 89 | """filecache for files in .hg but outside of .hg/store""" |
|
90 | 90 | def __init__(self, *paths): |
|
91 | 91 | super(repofilecache, self).__init__(*paths) |
|
92 | 92 | for path in paths: |
|
93 | 93 | _cachedfiles.add((path, 'plain')) |
|
94 | 94 | |
|
95 | 95 | def join(self, obj, fname): |
|
96 | 96 | return obj.vfs.join(fname) |
|
97 | 97 | |
|
98 | 98 | class storecache(_basefilecache): |
|
99 | 99 | """filecache for files in the store""" |
|
100 | 100 | def __init__(self, *paths): |
|
101 | 101 | super(storecache, self).__init__(*paths) |
|
102 | 102 | for path in paths: |
|
103 | 103 | _cachedfiles.add((path, '')) |
|
104 | 104 | |
|
105 | 105 | def join(self, obj, fname): |
|
106 | 106 | return obj.sjoin(fname) |
|
107 | 107 | |
|
108 | 108 | def isfilecached(repo, name): |
|
109 | 109 | """check if a repo has already cached "name" filecache-ed property |
|
110 | 110 | |
|
111 | 111 | This returns (cachedobj-or-None, iscached) tuple. |
|
112 | 112 | """ |
|
113 | 113 | cacheentry = repo.unfiltered()._filecache.get(name, None) |
|
114 | 114 | if not cacheentry: |
|
115 | 115 | return None, False |
|
116 | 116 | return cacheentry.obj, True |
|
117 | 117 | |
|
118 | 118 | class unfilteredpropertycache(util.propertycache): |
|
119 | 119 | """propertycache that apply to unfiltered repo only""" |
|
120 | 120 | |
|
121 | 121 | def __get__(self, repo, type=None): |
|
122 | 122 | unfi = repo.unfiltered() |
|
123 | 123 | if unfi is repo: |
|
124 | 124 | return super(unfilteredpropertycache, self).__get__(unfi) |
|
125 | 125 | return getattr(unfi, self.name) |
|
126 | 126 | |
|
127 | 127 | class filteredpropertycache(util.propertycache): |
|
128 | 128 | """propertycache that must take filtering in account""" |
|
129 | 129 | |
|
130 | 130 | def cachevalue(self, obj, value): |
|
131 | 131 | object.__setattr__(obj, self.name, value) |
|
132 | 132 | |
|
133 | 133 | |
|
134 | 134 | def hasunfilteredcache(repo, name): |
|
135 | 135 | """check if a repo has an unfilteredpropertycache value for <name>""" |
|
136 | 136 | return name in vars(repo.unfiltered()) |
|
137 | 137 | |
|
138 | 138 | def unfilteredmethod(orig): |
|
139 | 139 | """decorate method that always need to be run on unfiltered version""" |
|
140 | 140 | def wrapper(repo, *args, **kwargs): |
|
141 | 141 | return orig(repo.unfiltered(), *args, **kwargs) |
|
142 | 142 | return wrapper |
|
143 | 143 | |
|
144 | 144 | moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle', |
|
145 | 145 | 'unbundle'} |
|
146 | 146 | legacycaps = moderncaps.union({'changegroupsubset'}) |
|
147 | 147 | |
|
148 | 148 | class localpeer(repository.peer): |
|
149 | 149 | '''peer for a local repo; reflects only the most recent API''' |
|
150 | 150 | |
|
151 | 151 | def __init__(self, repo, caps=None): |
|
152 | 152 | super(localpeer, self).__init__() |
|
153 | 153 | |
|
154 | 154 | if caps is None: |
|
155 | 155 | caps = moderncaps.copy() |
|
156 | 156 | self._repo = repo.filtered('served') |
|
157 | 157 | self._ui = repo.ui |
|
158 | 158 | self._caps = repo._restrictcapabilities(caps) |
|
159 | 159 | |
|
160 | 160 | # Begin of _basepeer interface. |
|
161 | 161 | |
|
162 | 162 | @util.propertycache |
|
163 | 163 | def ui(self): |
|
164 | 164 | return self._ui |
|
165 | 165 | |
|
166 | 166 | def url(self): |
|
167 | 167 | return self._repo.url() |
|
168 | 168 | |
|
169 | 169 | def local(self): |
|
170 | 170 | return self._repo |
|
171 | 171 | |
|
172 | 172 | def peer(self): |
|
173 | 173 | return self |
|
174 | 174 | |
|
175 | 175 | def canpush(self): |
|
176 | 176 | return True |
|
177 | 177 | |
|
178 | 178 | def close(self): |
|
179 | 179 | self._repo.close() |
|
180 | 180 | |
|
181 | 181 | # End of _basepeer interface. |
|
182 | 182 | |
|
183 | 183 | # Begin of _basewirecommands interface. |
|
184 | 184 | |
|
185 | 185 | def branchmap(self): |
|
186 | 186 | return self._repo.branchmap() |
|
187 | 187 | |
|
188 | 188 | def capabilities(self): |
|
189 | 189 | return self._caps |
|
190 | 190 | |
|
191 | 191 | def debugwireargs(self, one, two, three=None, four=None, five=None): |
|
192 | 192 | """Used to test argument passing over the wire""" |
|
193 | 193 | return "%s %s %s %s %s" % (one, two, three, four, five) |
|
194 | 194 | |
|
195 | 195 | def getbundle(self, source, heads=None, common=None, bundlecaps=None, |
|
196 | 196 | **kwargs): |
|
197 | 197 | chunks = exchange.getbundlechunks(self._repo, source, heads=heads, |
|
198 | 198 | common=common, bundlecaps=bundlecaps, |
|
199 | 199 | **kwargs)[1] |
|
200 | 200 | cb = util.chunkbuffer(chunks) |
|
201 | 201 | |
|
202 | 202 | if exchange.bundle2requested(bundlecaps): |
|
203 | 203 | # When requesting a bundle2, getbundle returns a stream to make the |
|
204 | 204 | # wire level function happier. We need to build a proper object |
|
205 | 205 | # from it in local peer. |
|
206 | 206 | return bundle2.getunbundler(self.ui, cb) |
|
207 | 207 | else: |
|
208 | 208 | return changegroup.getunbundler('01', cb, None) |
|
209 | 209 | |
|
210 | 210 | def heads(self): |
|
211 | 211 | return self._repo.heads() |
|
212 | 212 | |
|
213 | 213 | def known(self, nodes): |
|
214 | 214 | return self._repo.known(nodes) |
|
215 | 215 | |
|
216 | 216 | def listkeys(self, namespace): |
|
217 | 217 | return self._repo.listkeys(namespace) |
|
218 | 218 | |
|
219 | 219 | def lookup(self, key): |
|
220 | 220 | return self._repo.lookup(key) |
|
221 | 221 | |
|
222 | 222 | def pushkey(self, namespace, key, old, new): |
|
223 | 223 | return self._repo.pushkey(namespace, key, old, new) |
|
224 | 224 | |
|
225 | 225 | def stream_out(self): |
|
226 | 226 | raise error.Abort(_('cannot perform stream clone against local ' |
|
227 | 227 | 'peer')) |
|
228 | 228 | |
|
229 | 229 | def unbundle(self, cg, heads, url): |
|
230 | 230 | """apply a bundle on a repo |
|
231 | 231 | |
|
232 | 232 | This function handles the repo locking itself.""" |
|
233 | 233 | try: |
|
234 | 234 | try: |
|
235 | 235 | cg = exchange.readbundle(self.ui, cg, None) |
|
236 | 236 | ret = exchange.unbundle(self._repo, cg, heads, 'push', url) |
|
237 | 237 | if util.safehasattr(ret, 'getchunks'): |
|
238 | 238 | # This is a bundle20 object, turn it into an unbundler. |
|
239 | 239 | # This little dance should be dropped eventually when the |
|
240 | 240 | # API is finally improved. |
|
241 | 241 | stream = util.chunkbuffer(ret.getchunks()) |
|
242 | 242 | ret = bundle2.getunbundler(self.ui, stream) |
|
243 | 243 | return ret |
|
244 | 244 | except Exception as exc: |
|
245 | 245 | # If the exception contains output salvaged from a bundle2 |
|
246 | 246 | # reply, we need to make sure it is printed before continuing |
|
247 | 247 | # to fail. So we build a bundle2 with such output and consume |
|
248 | 248 | # it directly. |
|
249 | 249 | # |
|
250 | 250 | # This is not very elegant but allows a "simple" solution for |
|
251 | 251 | # issue4594 |
|
252 | 252 | output = getattr(exc, '_bundle2salvagedoutput', ()) |
|
253 | 253 | if output: |
|
254 | 254 | bundler = bundle2.bundle20(self._repo.ui) |
|
255 | 255 | for out in output: |
|
256 | 256 | bundler.addpart(out) |
|
257 | 257 | stream = util.chunkbuffer(bundler.getchunks()) |
|
258 | 258 | b = bundle2.getunbundler(self.ui, stream) |
|
259 | 259 | bundle2.processbundle(self._repo, b) |
|
260 | 260 | raise |
|
261 | 261 | except error.PushRaced as exc: |
|
262 |
raise error.ResponseError(_('push failed:'), |
|
|
262 | raise error.ResponseError(_('push failed:'), | |
|
263 | util.forcebytestr(exc)) | |
|
263 | 264 | |
|
264 | 265 | # End of _basewirecommands interface. |
|
265 | 266 | |
|
266 | 267 | # Begin of peer interface. |
|
267 | 268 | |
|
268 | 269 | def iterbatch(self): |
|
269 | 270 | return peer.localiterbatcher(self) |
|
270 | 271 | |
|
271 | 272 | # End of peer interface. |
|
272 | 273 | |
|
273 | 274 | class locallegacypeer(repository.legacypeer, localpeer): |
|
274 | 275 | '''peer extension which implements legacy methods too; used for tests with |
|
275 | 276 | restricted capabilities''' |
|
276 | 277 | |
|
277 | 278 | def __init__(self, repo): |
|
278 | 279 | super(locallegacypeer, self).__init__(repo, caps=legacycaps) |
|
279 | 280 | |
|
280 | 281 | # Begin of baselegacywirecommands interface. |
|
281 | 282 | |
|
282 | 283 | def between(self, pairs): |
|
283 | 284 | return self._repo.between(pairs) |
|
284 | 285 | |
|
285 | 286 | def branches(self, nodes): |
|
286 | 287 | return self._repo.branches(nodes) |
|
287 | 288 | |
|
288 | 289 | def changegroup(self, basenodes, source): |
|
289 | 290 | outgoing = discovery.outgoing(self._repo, missingroots=basenodes, |
|
290 | 291 | missingheads=self._repo.heads()) |
|
291 | 292 | return changegroup.makechangegroup(self._repo, outgoing, '01', source) |
|
292 | 293 | |
|
293 | 294 | def changegroupsubset(self, bases, heads, source): |
|
294 | 295 | outgoing = discovery.outgoing(self._repo, missingroots=bases, |
|
295 | 296 | missingheads=heads) |
|
296 | 297 | return changegroup.makechangegroup(self._repo, outgoing, '01', source) |
|
297 | 298 | |
|
298 | 299 | # End of baselegacywirecommands interface. |
|
299 | 300 | |
|
300 | 301 | # Increment the sub-version when the revlog v2 format changes to lock out old |
|
301 | 302 | # clients. |
|
302 | 303 | REVLOGV2_REQUIREMENT = 'exp-revlogv2.0' |
|
303 | 304 | |
|
304 | 305 | class localrepository(object): |
|
305 | 306 | |
|
306 | 307 | # obsolete experimental requirements: |
|
307 | 308 | # - manifestv2: An experimental new manifest format that allowed |
|
308 | 309 | # for stem compression of long paths. Experiment ended up not |
|
309 | 310 | # being successful (repository sizes went up due to worse delta |
|
310 | 311 | # chains), and the code was deleted in 4.6. |
|
311 | 312 | supportedformats = { |
|
312 | 313 | 'revlogv1', |
|
313 | 314 | 'generaldelta', |
|
314 | 315 | 'treemanifest', |
|
315 | 316 | REVLOGV2_REQUIREMENT, |
|
316 | 317 | } |
|
317 | 318 | _basesupported = supportedformats | { |
|
318 | 319 | 'store', |
|
319 | 320 | 'fncache', |
|
320 | 321 | 'shared', |
|
321 | 322 | 'relshared', |
|
322 | 323 | 'dotencode', |
|
323 | 324 | 'exp-sparse', |
|
324 | 325 | } |
|
325 | 326 | openerreqs = { |
|
326 | 327 | 'revlogv1', |
|
327 | 328 | 'generaldelta', |
|
328 | 329 | 'treemanifest', |
|
329 | 330 | } |
|
330 | 331 | |
|
331 | 332 | # a list of (ui, featureset) functions. |
|
332 | 333 | # only functions defined in module of enabled extensions are invoked |
|
333 | 334 | featuresetupfuncs = set() |
|
334 | 335 | |
|
335 | 336 | # list of prefix for file which can be written without 'wlock' |
|
336 | 337 | # Extensions should extend this list when needed |
|
337 | 338 | _wlockfreeprefix = { |
|
338 | 339 | # We migh consider requiring 'wlock' for the next |
|
339 | 340 | # two, but pretty much all the existing code assume |
|
340 | 341 | # wlock is not needed so we keep them excluded for |
|
341 | 342 | # now. |
|
342 | 343 | 'hgrc', |
|
343 | 344 | 'requires', |
|
344 | 345 | # XXX cache is a complicatged business someone |
|
345 | 346 | # should investigate this in depth at some point |
|
346 | 347 | 'cache/', |
|
347 | 348 | # XXX shouldn't be dirstate covered by the wlock? |
|
348 | 349 | 'dirstate', |
|
349 | 350 | # XXX bisect was still a bit too messy at the time |
|
350 | 351 | # this changeset was introduced. Someone should fix |
|
351 | 352 | # the remainig bit and drop this line |
|
352 | 353 | 'bisect.state', |
|
353 | 354 | } |
|
354 | 355 | |
|
355 | 356 | def __init__(self, baseui, path, create=False): |
|
356 | 357 | self.requirements = set() |
|
357 | 358 | self.filtername = None |
|
358 | 359 | # wvfs: rooted at the repository root, used to access the working copy |
|
359 | 360 | self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True) |
|
360 | 361 | # vfs: rooted at .hg, used to access repo files outside of .hg/store |
|
361 | 362 | self.vfs = None |
|
362 | 363 | # svfs: usually rooted at .hg/store, used to access repository history |
|
363 | 364 | # If this is a shared repository, this vfs may point to another |
|
364 | 365 | # repository's .hg/store directory. |
|
365 | 366 | self.svfs = None |
|
366 | 367 | self.root = self.wvfs.base |
|
367 | 368 | self.path = self.wvfs.join(".hg") |
|
368 | 369 | self.origroot = path |
|
369 | 370 | # This is only used by context.workingctx.match in order to |
|
370 | 371 | # detect files in subrepos. |
|
371 | 372 | self.auditor = pathutil.pathauditor( |
|
372 | 373 | self.root, callback=self._checknested) |
|
373 | 374 | # This is only used by context.basectx.match in order to detect |
|
374 | 375 | # files in subrepos. |
|
375 | 376 | self.nofsauditor = pathutil.pathauditor( |
|
376 | 377 | self.root, callback=self._checknested, realfs=False, cached=True) |
|
377 | 378 | self.baseui = baseui |
|
378 | 379 | self.ui = baseui.copy() |
|
379 | 380 | self.ui.copy = baseui.copy # prevent copying repo configuration |
|
380 | 381 | self.vfs = vfsmod.vfs(self.path, cacheaudited=True) |
|
381 | 382 | if (self.ui.configbool('devel', 'all-warnings') or |
|
382 | 383 | self.ui.configbool('devel', 'check-locks')): |
|
383 | 384 | self.vfs.audit = self._getvfsward(self.vfs.audit) |
|
384 | 385 | # A list of callback to shape the phase if no data were found. |
|
385 | 386 | # Callback are in the form: func(repo, roots) --> processed root. |
|
386 | 387 | # This list it to be filled by extension during repo setup |
|
387 | 388 | self._phasedefaults = [] |
|
388 | 389 | try: |
|
389 | 390 | self.ui.readconfig(self.vfs.join("hgrc"), self.root) |
|
390 | 391 | self._loadextensions() |
|
391 | 392 | except IOError: |
|
392 | 393 | pass |
|
393 | 394 | |
|
394 | 395 | if self.featuresetupfuncs: |
|
395 | 396 | self.supported = set(self._basesupported) # use private copy |
|
396 | 397 | extmods = set(m.__name__ for n, m |
|
397 | 398 | in extensions.extensions(self.ui)) |
|
398 | 399 | for setupfunc in self.featuresetupfuncs: |
|
399 | 400 | if setupfunc.__module__ in extmods: |
|
400 | 401 | setupfunc(self.ui, self.supported) |
|
401 | 402 | else: |
|
402 | 403 | self.supported = self._basesupported |
|
403 | 404 | color.setup(self.ui) |
|
404 | 405 | |
|
405 | 406 | # Add compression engines. |
|
406 | 407 | for name in util.compengines: |
|
407 | 408 | engine = util.compengines[name] |
|
408 | 409 | if engine.revlogheader(): |
|
409 | 410 | self.supported.add('exp-compression-%s' % name) |
|
410 | 411 | |
|
411 | 412 | if not self.vfs.isdir(): |
|
412 | 413 | if create: |
|
413 | 414 | self.requirements = newreporequirements(self) |
|
414 | 415 | |
|
415 | 416 | if not self.wvfs.exists(): |
|
416 | 417 | self.wvfs.makedirs() |
|
417 | 418 | self.vfs.makedir(notindexed=True) |
|
418 | 419 | |
|
419 | 420 | if 'store' in self.requirements: |
|
420 | 421 | self.vfs.mkdir("store") |
|
421 | 422 | |
|
422 | 423 | # create an invalid changelog |
|
423 | 424 | self.vfs.append( |
|
424 | 425 | "00changelog.i", |
|
425 | 426 | '\0\0\0\2' # represents revlogv2 |
|
426 | 427 | ' dummy changelog to prevent using the old repo layout' |
|
427 | 428 | ) |
|
428 | 429 | else: |
|
429 | 430 | raise error.RepoError(_("repository %s not found") % path) |
|
430 | 431 | elif create: |
|
431 | 432 | raise error.RepoError(_("repository %s already exists") % path) |
|
432 | 433 | else: |
|
433 | 434 | try: |
|
434 | 435 | self.requirements = scmutil.readrequires( |
|
435 | 436 | self.vfs, self.supported) |
|
436 | 437 | except IOError as inst: |
|
437 | 438 | if inst.errno != errno.ENOENT: |
|
438 | 439 | raise |
|
439 | 440 | |
|
440 | 441 | cachepath = self.vfs.join('cache') |
|
441 | 442 | self.sharedpath = self.path |
|
442 | 443 | try: |
|
443 | 444 | sharedpath = self.vfs.read("sharedpath").rstrip('\n') |
|
444 | 445 | if 'relshared' in self.requirements: |
|
445 | 446 | sharedpath = self.vfs.join(sharedpath) |
|
446 | 447 | vfs = vfsmod.vfs(sharedpath, realpath=True) |
|
447 | 448 | cachepath = vfs.join('cache') |
|
448 | 449 | s = vfs.base |
|
449 | 450 | if not vfs.exists(): |
|
450 | 451 | raise error.RepoError( |
|
451 | 452 | _('.hg/sharedpath points to nonexistent directory %s') % s) |
|
452 | 453 | self.sharedpath = s |
|
453 | 454 | except IOError as inst: |
|
454 | 455 | if inst.errno != errno.ENOENT: |
|
455 | 456 | raise |
|
456 | 457 | |
|
457 | 458 | if 'exp-sparse' in self.requirements and not sparse.enabled: |
|
458 | 459 | raise error.RepoError(_('repository is using sparse feature but ' |
|
459 | 460 | 'sparse is not enabled; enable the ' |
|
460 | 461 | '"sparse" extensions to access')) |
|
461 | 462 | |
|
462 | 463 | self.store = store.store( |
|
463 | 464 | self.requirements, self.sharedpath, |
|
464 | 465 | lambda base: vfsmod.vfs(base, cacheaudited=True)) |
|
465 | 466 | self.spath = self.store.path |
|
466 | 467 | self.svfs = self.store.vfs |
|
467 | 468 | self.sjoin = self.store.join |
|
468 | 469 | self.vfs.createmode = self.store.createmode |
|
469 | 470 | self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True) |
|
470 | 471 | self.cachevfs.createmode = self.store.createmode |
|
471 | 472 | if (self.ui.configbool('devel', 'all-warnings') or |
|
472 | 473 | self.ui.configbool('devel', 'check-locks')): |
|
473 | 474 | if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs |
|
474 | 475 | self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit) |
|
475 | 476 | else: # standard vfs |
|
476 | 477 | self.svfs.audit = self._getsvfsward(self.svfs.audit) |
|
477 | 478 | self._applyopenerreqs() |
|
478 | 479 | if create: |
|
479 | 480 | self._writerequirements() |
|
480 | 481 | |
|
481 | 482 | self._dirstatevalidatewarned = False |
|
482 | 483 | |
|
483 | 484 | self._branchcaches = {} |
|
484 | 485 | self._revbranchcache = None |
|
485 | 486 | self.filterpats = {} |
|
486 | 487 | self._datafilters = {} |
|
487 | 488 | self._transref = self._lockref = self._wlockref = None |
|
488 | 489 | |
|
489 | 490 | # A cache for various files under .hg/ that tracks file changes, |
|
490 | 491 | # (used by the filecache decorator) |
|
491 | 492 | # |
|
492 | 493 | # Maps a property name to its util.filecacheentry |
|
493 | 494 | self._filecache = {} |
|
494 | 495 | |
|
495 | 496 | # hold sets of revision to be filtered |
|
496 | 497 | # should be cleared when something might have changed the filter value: |
|
497 | 498 | # - new changesets, |
|
498 | 499 | # - phase change, |
|
499 | 500 | # - new obsolescence marker, |
|
500 | 501 | # - working directory parent change, |
|
501 | 502 | # - bookmark changes |
|
502 | 503 | self.filteredrevcache = {} |
|
503 | 504 | |
|
504 | 505 | # post-dirstate-status hooks |
|
505 | 506 | self._postdsstatus = [] |
|
506 | 507 | |
|
507 | 508 | # generic mapping between names and nodes |
|
508 | 509 | self.names = namespaces.namespaces() |
|
509 | 510 | |
|
510 | 511 | # Key to signature value. |
|
511 | 512 | self._sparsesignaturecache = {} |
|
512 | 513 | # Signature to cached matcher instance. |
|
513 | 514 | self._sparsematchercache = {} |
|
514 | 515 | |
|
515 | 516 | def _getvfsward(self, origfunc): |
|
516 | 517 | """build a ward for self.vfs""" |
|
517 | 518 | rref = weakref.ref(self) |
|
518 | 519 | def checkvfs(path, mode=None): |
|
519 | 520 | ret = origfunc(path, mode=mode) |
|
520 | 521 | repo = rref() |
|
521 | 522 | if (repo is None |
|
522 | 523 | or not util.safehasattr(repo, '_wlockref') |
|
523 | 524 | or not util.safehasattr(repo, '_lockref')): |
|
524 | 525 | return |
|
525 | 526 | if mode in (None, 'r', 'rb'): |
|
526 | 527 | return |
|
527 | 528 | if path.startswith(repo.path): |
|
528 | 529 | # truncate name relative to the repository (.hg) |
|
529 | 530 | path = path[len(repo.path) + 1:] |
|
530 | 531 | if path.startswith('cache/'): |
|
531 | 532 | msg = 'accessing cache with vfs instead of cachevfs: "%s"' |
|
532 | 533 | repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs") |
|
533 | 534 | if path.startswith('journal.'): |
|
534 | 535 | # journal is covered by 'lock' |
|
535 | 536 | if repo._currentlock(repo._lockref) is None: |
|
536 | 537 | repo.ui.develwarn('write with no lock: "%s"' % path, |
|
537 | 538 | stacklevel=2, config='check-locks') |
|
538 | 539 | elif repo._currentlock(repo._wlockref) is None: |
|
539 | 540 | # rest of vfs files are covered by 'wlock' |
|
540 | 541 | # |
|
541 | 542 | # exclude special files |
|
542 | 543 | for prefix in self._wlockfreeprefix: |
|
543 | 544 | if path.startswith(prefix): |
|
544 | 545 | return |
|
545 | 546 | repo.ui.develwarn('write with no wlock: "%s"' % path, |
|
546 | 547 | stacklevel=2, config='check-locks') |
|
547 | 548 | return ret |
|
548 | 549 | return checkvfs |
|
549 | 550 | |
|
550 | 551 | def _getsvfsward(self, origfunc): |
|
551 | 552 | """build a ward for self.svfs""" |
|
552 | 553 | rref = weakref.ref(self) |
|
553 | 554 | def checksvfs(path, mode=None): |
|
554 | 555 | ret = origfunc(path, mode=mode) |
|
555 | 556 | repo = rref() |
|
556 | 557 | if repo is None or not util.safehasattr(repo, '_lockref'): |
|
557 | 558 | return |
|
558 | 559 | if mode in (None, 'r', 'rb'): |
|
559 | 560 | return |
|
560 | 561 | if path.startswith(repo.sharedpath): |
|
561 | 562 | # truncate name relative to the repository (.hg) |
|
562 | 563 | path = path[len(repo.sharedpath) + 1:] |
|
563 | 564 | if repo._currentlock(repo._lockref) is None: |
|
564 | 565 | repo.ui.develwarn('write with no lock: "%s"' % path, |
|
565 | 566 | stacklevel=3) |
|
566 | 567 | return ret |
|
567 | 568 | return checksvfs |
|
568 | 569 | |
|
569 | 570 | def close(self): |
|
570 | 571 | self._writecaches() |
|
571 | 572 | |
|
572 | 573 | def _loadextensions(self): |
|
573 | 574 | extensions.loadall(self.ui) |
|
574 | 575 | |
|
575 | 576 | def _writecaches(self): |
|
576 | 577 | if self._revbranchcache: |
|
577 | 578 | self._revbranchcache.write() |
|
578 | 579 | |
|
579 | 580 | def _restrictcapabilities(self, caps): |
|
580 | 581 | if self.ui.configbool('experimental', 'bundle2-advertise'): |
|
581 | 582 | caps = set(caps) |
|
582 | 583 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(self, |
|
583 | 584 | role='client')) |
|
584 | 585 | caps.add('bundle2=' + urlreq.quote(capsblob)) |
|
585 | 586 | return caps |
|
586 | 587 | |
|
587 | 588 | def _applyopenerreqs(self): |
|
588 | 589 | self.svfs.options = dict((r, 1) for r in self.requirements |
|
589 | 590 | if r in self.openerreqs) |
|
590 | 591 | # experimental config: format.chunkcachesize |
|
591 | 592 | chunkcachesize = self.ui.configint('format', 'chunkcachesize') |
|
592 | 593 | if chunkcachesize is not None: |
|
593 | 594 | self.svfs.options['chunkcachesize'] = chunkcachesize |
|
594 | 595 | # experimental config: format.maxchainlen |
|
595 | 596 | maxchainlen = self.ui.configint('format', 'maxchainlen') |
|
596 | 597 | if maxchainlen is not None: |
|
597 | 598 | self.svfs.options['maxchainlen'] = maxchainlen |
|
598 | 599 | # experimental config: format.manifestcachesize |
|
599 | 600 | manifestcachesize = self.ui.configint('format', 'manifestcachesize') |
|
600 | 601 | if manifestcachesize is not None: |
|
601 | 602 | self.svfs.options['manifestcachesize'] = manifestcachesize |
|
602 | 603 | # experimental config: format.aggressivemergedeltas |
|
603 | 604 | aggressivemergedeltas = self.ui.configbool('format', |
|
604 | 605 | 'aggressivemergedeltas') |
|
605 | 606 | self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas |
|
606 | 607 | self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui) |
|
607 | 608 | chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan') |
|
608 | 609 | if 0 <= chainspan: |
|
609 | 610 | self.svfs.options['maxdeltachainspan'] = chainspan |
|
610 | 611 | mmapindexthreshold = self.ui.configbytes('experimental', |
|
611 | 612 | 'mmapindexthreshold') |
|
612 | 613 | if mmapindexthreshold is not None: |
|
613 | 614 | self.svfs.options['mmapindexthreshold'] = mmapindexthreshold |
|
614 | 615 | withsparseread = self.ui.configbool('experimental', 'sparse-read') |
|
615 | 616 | srdensitythres = float(self.ui.config('experimental', |
|
616 | 617 | 'sparse-read.density-threshold')) |
|
617 | 618 | srmingapsize = self.ui.configbytes('experimental', |
|
618 | 619 | 'sparse-read.min-gap-size') |
|
619 | 620 | self.svfs.options['with-sparse-read'] = withsparseread |
|
620 | 621 | self.svfs.options['sparse-read-density-threshold'] = srdensitythres |
|
621 | 622 | self.svfs.options['sparse-read-min-gap-size'] = srmingapsize |
|
622 | 623 | |
|
623 | 624 | for r in self.requirements: |
|
624 | 625 | if r.startswith('exp-compression-'): |
|
625 | 626 | self.svfs.options['compengine'] = r[len('exp-compression-'):] |
|
626 | 627 | |
|
627 | 628 | # TODO move "revlogv2" to openerreqs once finalized. |
|
628 | 629 | if REVLOGV2_REQUIREMENT in self.requirements: |
|
629 | 630 | self.svfs.options['revlogv2'] = True |
|
630 | 631 | |
|
631 | 632 | def _writerequirements(self): |
|
632 | 633 | scmutil.writerequires(self.vfs, self.requirements) |
|
633 | 634 | |
|
634 | 635 | def _checknested(self, path): |
|
635 | 636 | """Determine if path is a legal nested repository.""" |
|
636 | 637 | if not path.startswith(self.root): |
|
637 | 638 | return False |
|
638 | 639 | subpath = path[len(self.root) + 1:] |
|
639 | 640 | normsubpath = util.pconvert(subpath) |
|
640 | 641 | |
|
641 | 642 | # XXX: Checking against the current working copy is wrong in |
|
642 | 643 | # the sense that it can reject things like |
|
643 | 644 | # |
|
644 | 645 | # $ hg cat -r 10 sub/x.txt |
|
645 | 646 | # |
|
646 | 647 | # if sub/ is no longer a subrepository in the working copy |
|
647 | 648 | # parent revision. |
|
648 | 649 | # |
|
649 | 650 | # However, it can of course also allow things that would have |
|
650 | 651 | # been rejected before, such as the above cat command if sub/ |
|
651 | 652 | # is a subrepository now, but was a normal directory before. |
|
652 | 653 | # The old path auditor would have rejected by mistake since it |
|
653 | 654 | # panics when it sees sub/.hg/. |
|
654 | 655 | # |
|
655 | 656 | # All in all, checking against the working copy seems sensible |
|
656 | 657 | # since we want to prevent access to nested repositories on |
|
657 | 658 | # the filesystem *now*. |
|
658 | 659 | ctx = self[None] |
|
659 | 660 | parts = util.splitpath(subpath) |
|
660 | 661 | while parts: |
|
661 | 662 | prefix = '/'.join(parts) |
|
662 | 663 | if prefix in ctx.substate: |
|
663 | 664 | if prefix == normsubpath: |
|
664 | 665 | return True |
|
665 | 666 | else: |
|
666 | 667 | sub = ctx.sub(prefix) |
|
667 | 668 | return sub.checknested(subpath[len(prefix) + 1:]) |
|
668 | 669 | else: |
|
669 | 670 | parts.pop() |
|
670 | 671 | return False |
|
671 | 672 | |
|
672 | 673 | def peer(self): |
|
673 | 674 | return localpeer(self) # not cached to avoid reference cycle |
|
674 | 675 | |
|
675 | 676 | def unfiltered(self): |
|
676 | 677 | """Return unfiltered version of the repository |
|
677 | 678 | |
|
678 | 679 | Intended to be overwritten by filtered repo.""" |
|
679 | 680 | return self |
|
680 | 681 | |
|
681 | 682 | def filtered(self, name, visibilityexceptions=None): |
|
682 | 683 | """Return a filtered version of a repository""" |
|
683 | 684 | cls = repoview.newtype(self.unfiltered().__class__) |
|
684 | 685 | return cls(self, name, visibilityexceptions) |
|
685 | 686 | |
|
686 | 687 | @repofilecache('bookmarks', 'bookmarks.current') |
|
687 | 688 | def _bookmarks(self): |
|
688 | 689 | return bookmarks.bmstore(self) |
|
689 | 690 | |
|
690 | 691 | @property |
|
691 | 692 | def _activebookmark(self): |
|
692 | 693 | return self._bookmarks.active |
|
693 | 694 | |
|
694 | 695 | # _phasesets depend on changelog. what we need is to call |
|
695 | 696 | # _phasecache.invalidate() if '00changelog.i' was changed, but it |
|
696 | 697 | # can't be easily expressed in filecache mechanism. |
|
697 | 698 | @storecache('phaseroots', '00changelog.i') |
|
698 | 699 | def _phasecache(self): |
|
699 | 700 | return phases.phasecache(self, self._phasedefaults) |
|
700 | 701 | |
|
701 | 702 | @storecache('obsstore') |
|
702 | 703 | def obsstore(self): |
|
703 | 704 | return obsolete.makestore(self.ui, self) |
|
704 | 705 | |
|
705 | 706 | @storecache('00changelog.i') |
|
706 | 707 | def changelog(self): |
|
707 | 708 | return changelog.changelog(self.svfs, |
|
708 | 709 | trypending=txnutil.mayhavepending(self.root)) |
|
709 | 710 | |
|
710 | 711 | def _constructmanifest(self): |
|
711 | 712 | # This is a temporary function while we migrate from manifest to |
|
712 | 713 | # manifestlog. It allows bundlerepo and unionrepo to intercept the |
|
713 | 714 | # manifest creation. |
|
714 | 715 | return manifest.manifestrevlog(self.svfs) |
|
715 | 716 | |
|
716 | 717 | @storecache('00manifest.i') |
|
717 | 718 | def manifestlog(self): |
|
718 | 719 | return manifest.manifestlog(self.svfs, self) |
|
719 | 720 | |
|
720 | 721 | @repofilecache('dirstate') |
|
721 | 722 | def dirstate(self): |
|
722 | 723 | sparsematchfn = lambda: sparse.matcher(self) |
|
723 | 724 | |
|
724 | 725 | return dirstate.dirstate(self.vfs, self.ui, self.root, |
|
725 | 726 | self._dirstatevalidate, sparsematchfn) |
|
726 | 727 | |
|
727 | 728 | def _dirstatevalidate(self, node): |
|
728 | 729 | try: |
|
729 | 730 | self.changelog.rev(node) |
|
730 | 731 | return node |
|
731 | 732 | except error.LookupError: |
|
732 | 733 | if not self._dirstatevalidatewarned: |
|
733 | 734 | self._dirstatevalidatewarned = True |
|
734 | 735 | self.ui.warn(_("warning: ignoring unknown" |
|
735 | 736 | " working parent %s!\n") % short(node)) |
|
736 | 737 | return nullid |
|
737 | 738 | |
|
738 | 739 | def __getitem__(self, changeid): |
|
739 | 740 | if changeid is None: |
|
740 | 741 | return context.workingctx(self) |
|
741 | 742 | if isinstance(changeid, slice): |
|
742 | 743 | # wdirrev isn't contiguous so the slice shouldn't include it |
|
743 | 744 | return [context.changectx(self, i) |
|
744 | 745 | for i in xrange(*changeid.indices(len(self))) |
|
745 | 746 | if i not in self.changelog.filteredrevs] |
|
746 | 747 | try: |
|
747 | 748 | return context.changectx(self, changeid) |
|
748 | 749 | except error.WdirUnsupported: |
|
749 | 750 | return context.workingctx(self) |
|
750 | 751 | |
|
751 | 752 | def __contains__(self, changeid): |
|
752 | 753 | """True if the given changeid exists |
|
753 | 754 | |
|
754 | 755 | error.LookupError is raised if an ambiguous node specified. |
|
755 | 756 | """ |
|
756 | 757 | try: |
|
757 | 758 | self[changeid] |
|
758 | 759 | return True |
|
759 | 760 | except error.RepoLookupError: |
|
760 | 761 | return False |
|
761 | 762 | |
|
762 | 763 | def __nonzero__(self): |
|
763 | 764 | return True |
|
764 | 765 | |
|
765 | 766 | __bool__ = __nonzero__ |
|
766 | 767 | |
|
767 | 768 | def __len__(self): |
|
768 | 769 | # no need to pay the cost of repoview.changelog |
|
769 | 770 | unfi = self.unfiltered() |
|
770 | 771 | return len(unfi.changelog) |
|
771 | 772 | |
|
772 | 773 | def __iter__(self): |
|
773 | 774 | return iter(self.changelog) |
|
774 | 775 | |
|
775 | 776 | def revs(self, expr, *args): |
|
776 | 777 | '''Find revisions matching a revset. |
|
777 | 778 | |
|
778 | 779 | The revset is specified as a string ``expr`` that may contain |
|
779 | 780 | %-formatting to escape certain types. See ``revsetlang.formatspec``. |
|
780 | 781 | |
|
781 | 782 | Revset aliases from the configuration are not expanded. To expand |
|
782 | 783 | user aliases, consider calling ``scmutil.revrange()`` or |
|
783 | 784 | ``repo.anyrevs([expr], user=True)``. |
|
784 | 785 | |
|
785 | 786 | Returns a revset.abstractsmartset, which is a list-like interface |
|
786 | 787 | that contains integer revisions. |
|
787 | 788 | ''' |
|
788 | 789 | expr = revsetlang.formatspec(expr, *args) |
|
789 | 790 | m = revset.match(None, expr) |
|
790 | 791 | return m(self) |
|
791 | 792 | |
|
792 | 793 | def set(self, expr, *args): |
|
793 | 794 | '''Find revisions matching a revset and emit changectx instances. |
|
794 | 795 | |
|
795 | 796 | This is a convenience wrapper around ``revs()`` that iterates the |
|
796 | 797 | result and is a generator of changectx instances. |
|
797 | 798 | |
|
798 | 799 | Revset aliases from the configuration are not expanded. To expand |
|
799 | 800 | user aliases, consider calling ``scmutil.revrange()``. |
|
800 | 801 | ''' |
|
801 | 802 | for r in self.revs(expr, *args): |
|
802 | 803 | yield self[r] |
|
803 | 804 | |
|
804 | 805 | def anyrevs(self, specs, user=False, localalias=None): |
|
805 | 806 | '''Find revisions matching one of the given revsets. |
|
806 | 807 | |
|
807 | 808 | Revset aliases from the configuration are not expanded by default. To |
|
808 | 809 | expand user aliases, specify ``user=True``. To provide some local |
|
809 | 810 | definitions overriding user aliases, set ``localalias`` to |
|
810 | 811 | ``{name: definitionstring}``. |
|
811 | 812 | ''' |
|
812 | 813 | if user: |
|
813 | 814 | m = revset.matchany(self.ui, specs, repo=self, |
|
814 | 815 | localalias=localalias) |
|
815 | 816 | else: |
|
816 | 817 | m = revset.matchany(None, specs, localalias=localalias) |
|
817 | 818 | return m(self) |
|
818 | 819 | |
|
819 | 820 | def url(self): |
|
820 | 821 | return 'file:' + self.root |
|
821 | 822 | |
|
822 | 823 | def hook(self, name, throw=False, **args): |
|
823 | 824 | """Call a hook, passing this repo instance. |
|
824 | 825 | |
|
825 | 826 | This a convenience method to aid invoking hooks. Extensions likely |
|
826 | 827 | won't call this unless they have registered a custom hook or are |
|
827 | 828 | replacing code that is expected to call a hook. |
|
828 | 829 | """ |
|
829 | 830 | return hook.hook(self.ui, self, name, throw, **args) |
|
830 | 831 | |
|
831 | 832 | @filteredpropertycache |
|
832 | 833 | def _tagscache(self): |
|
833 | 834 | '''Returns a tagscache object that contains various tags related |
|
834 | 835 | caches.''' |
|
835 | 836 | |
|
836 | 837 | # This simplifies its cache management by having one decorated |
|
837 | 838 | # function (this one) and the rest simply fetch things from it. |
|
838 | 839 | class tagscache(object): |
|
839 | 840 | def __init__(self): |
|
840 | 841 | # These two define the set of tags for this repository. tags |
|
841 | 842 | # maps tag name to node; tagtypes maps tag name to 'global' or |
|
842 | 843 | # 'local'. (Global tags are defined by .hgtags across all |
|
843 | 844 | # heads, and local tags are defined in .hg/localtags.) |
|
844 | 845 | # They constitute the in-memory cache of tags. |
|
845 | 846 | self.tags = self.tagtypes = None |
|
846 | 847 | |
|
847 | 848 | self.nodetagscache = self.tagslist = None |
|
848 | 849 | |
|
849 | 850 | cache = tagscache() |
|
850 | 851 | cache.tags, cache.tagtypes = self._findtags() |
|
851 | 852 | |
|
852 | 853 | return cache |
|
853 | 854 | |
|
854 | 855 | def tags(self): |
|
855 | 856 | '''return a mapping of tag to node''' |
|
856 | 857 | t = {} |
|
857 | 858 | if self.changelog.filteredrevs: |
|
858 | 859 | tags, tt = self._findtags() |
|
859 | 860 | else: |
|
860 | 861 | tags = self._tagscache.tags |
|
861 | 862 | for k, v in tags.iteritems(): |
|
862 | 863 | try: |
|
863 | 864 | # ignore tags to unknown nodes |
|
864 | 865 | self.changelog.rev(v) |
|
865 | 866 | t[k] = v |
|
866 | 867 | except (error.LookupError, ValueError): |
|
867 | 868 | pass |
|
868 | 869 | return t |
|
869 | 870 | |
|
870 | 871 | def _findtags(self): |
|
871 | 872 | '''Do the hard work of finding tags. Return a pair of dicts |
|
872 | 873 | (tags, tagtypes) where tags maps tag name to node, and tagtypes |
|
873 | 874 | maps tag name to a string like \'global\' or \'local\'. |
|
874 | 875 | Subclasses or extensions are free to add their own tags, but |
|
875 | 876 | should be aware that the returned dicts will be retained for the |
|
876 | 877 | duration of the localrepo object.''' |
|
877 | 878 | |
|
878 | 879 | # XXX what tagtype should subclasses/extensions use? Currently |
|
879 | 880 | # mq and bookmarks add tags, but do not set the tagtype at all. |
|
880 | 881 | # Should each extension invent its own tag type? Should there |
|
881 | 882 | # be one tagtype for all such "virtual" tags? Or is the status |
|
882 | 883 | # quo fine? |
|
883 | 884 | |
|
884 | 885 | |
|
885 | 886 | # map tag name to (node, hist) |
|
886 | 887 | alltags = tagsmod.findglobaltags(self.ui, self) |
|
887 | 888 | # map tag name to tag type |
|
888 | 889 | tagtypes = dict((tag, 'global') for tag in alltags) |
|
889 | 890 | |
|
890 | 891 | tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) |
|
891 | 892 | |
|
892 | 893 | # Build the return dicts. Have to re-encode tag names because |
|
893 | 894 | # the tags module always uses UTF-8 (in order not to lose info |
|
894 | 895 | # writing to the cache), but the rest of Mercurial wants them in |
|
895 | 896 | # local encoding. |
|
896 | 897 | tags = {} |
|
897 | 898 | for (name, (node, hist)) in alltags.iteritems(): |
|
898 | 899 | if node != nullid: |
|
899 | 900 | tags[encoding.tolocal(name)] = node |
|
900 | 901 | tags['tip'] = self.changelog.tip() |
|
901 | 902 | tagtypes = dict([(encoding.tolocal(name), value) |
|
902 | 903 | for (name, value) in tagtypes.iteritems()]) |
|
903 | 904 | return (tags, tagtypes) |
|
904 | 905 | |
|
905 | 906 | def tagtype(self, tagname): |
|
906 | 907 | ''' |
|
907 | 908 | return the type of the given tag. result can be: |
|
908 | 909 | |
|
909 | 910 | 'local' : a local tag |
|
910 | 911 | 'global' : a global tag |
|
911 | 912 | None : tag does not exist |
|
912 | 913 | ''' |
|
913 | 914 | |
|
914 | 915 | return self._tagscache.tagtypes.get(tagname) |
|
915 | 916 | |
|
916 | 917 | def tagslist(self): |
|
917 | 918 | '''return a list of tags ordered by revision''' |
|
918 | 919 | if not self._tagscache.tagslist: |
|
919 | 920 | l = [] |
|
920 | 921 | for t, n in self.tags().iteritems(): |
|
921 | 922 | l.append((self.changelog.rev(n), t, n)) |
|
922 | 923 | self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)] |
|
923 | 924 | |
|
924 | 925 | return self._tagscache.tagslist |
|
925 | 926 | |
|
926 | 927 | def nodetags(self, node): |
|
927 | 928 | '''return the tags associated with a node''' |
|
928 | 929 | if not self._tagscache.nodetagscache: |
|
929 | 930 | nodetagscache = {} |
|
930 | 931 | for t, n in self._tagscache.tags.iteritems(): |
|
931 | 932 | nodetagscache.setdefault(n, []).append(t) |
|
932 | 933 | for tags in nodetagscache.itervalues(): |
|
933 | 934 | tags.sort() |
|
934 | 935 | self._tagscache.nodetagscache = nodetagscache |
|
935 | 936 | return self._tagscache.nodetagscache.get(node, []) |
|
936 | 937 | |
|
937 | 938 | def nodebookmarks(self, node): |
|
938 | 939 | """return the list of bookmarks pointing to the specified node""" |
|
939 | 940 | marks = [] |
|
940 | 941 | for bookmark, n in self._bookmarks.iteritems(): |
|
941 | 942 | if n == node: |
|
942 | 943 | marks.append(bookmark) |
|
943 | 944 | return sorted(marks) |
|
944 | 945 | |
|
945 | 946 | def branchmap(self): |
|
946 | 947 | '''returns a dictionary {branch: [branchheads]} with branchheads |
|
947 | 948 | ordered by increasing revision number''' |
|
948 | 949 | branchmap.updatecache(self) |
|
949 | 950 | return self._branchcaches[self.filtername] |
|
950 | 951 | |
|
951 | 952 | @unfilteredmethod |
|
952 | 953 | def revbranchcache(self): |
|
953 | 954 | if not self._revbranchcache: |
|
954 | 955 | self._revbranchcache = branchmap.revbranchcache(self.unfiltered()) |
|
955 | 956 | return self._revbranchcache |
|
956 | 957 | |
|
957 | 958 | def branchtip(self, branch, ignoremissing=False): |
|
958 | 959 | '''return the tip node for a given branch |
|
959 | 960 | |
|
960 | 961 | If ignoremissing is True, then this method will not raise an error. |
|
961 | 962 | This is helpful for callers that only expect None for a missing branch |
|
962 | 963 | (e.g. namespace). |
|
963 | 964 | |
|
964 | 965 | ''' |
|
965 | 966 | try: |
|
966 | 967 | return self.branchmap().branchtip(branch) |
|
967 | 968 | except KeyError: |
|
968 | 969 | if not ignoremissing: |
|
969 | 970 | raise error.RepoLookupError(_("unknown branch '%s'") % branch) |
|
970 | 971 | else: |
|
971 | 972 | pass |
|
972 | 973 | |
|
973 | 974 | def lookup(self, key): |
|
974 | 975 | return self[key].node() |
|
975 | 976 | |
|
976 | 977 | def lookupbranch(self, key, remote=None): |
|
977 | 978 | repo = remote or self |
|
978 | 979 | if key in repo.branchmap(): |
|
979 | 980 | return key |
|
980 | 981 | |
|
981 | 982 | repo = (remote and remote.local()) and remote or self |
|
982 | 983 | return repo[key].branch() |
|
983 | 984 | |
|
984 | 985 | def known(self, nodes): |
|
985 | 986 | cl = self.changelog |
|
986 | 987 | nm = cl.nodemap |
|
987 | 988 | filtered = cl.filteredrevs |
|
988 | 989 | result = [] |
|
989 | 990 | for n in nodes: |
|
990 | 991 | r = nm.get(n) |
|
991 | 992 | resp = not (r is None or r in filtered) |
|
992 | 993 | result.append(resp) |
|
993 | 994 | return result |
|
994 | 995 | |
|
995 | 996 | def local(self): |
|
996 | 997 | return self |
|
997 | 998 | |
|
998 | 999 | def publishing(self): |
|
999 | 1000 | # it's safe (and desirable) to trust the publish flag unconditionally |
|
1000 | 1001 | # so that we don't finalize changes shared between users via ssh or nfs |
|
1001 | 1002 | return self.ui.configbool('phases', 'publish', untrusted=True) |
|
1002 | 1003 | |
|
1003 | 1004 | def cancopy(self): |
|
1004 | 1005 | # so statichttprepo's override of local() works |
|
1005 | 1006 | if not self.local(): |
|
1006 | 1007 | return False |
|
1007 | 1008 | if not self.publishing(): |
|
1008 | 1009 | return True |
|
1009 | 1010 | # if publishing we can't copy if there is filtered content |
|
1010 | 1011 | return not self.filtered('visible').changelog.filteredrevs |
|
1011 | 1012 | |
|
1012 | 1013 | def shared(self): |
|
1013 | 1014 | '''the type of shared repository (None if not shared)''' |
|
1014 | 1015 | if self.sharedpath != self.path: |
|
1015 | 1016 | return 'store' |
|
1016 | 1017 | return None |
|
1017 | 1018 | |
|
1018 | 1019 | def wjoin(self, f, *insidef): |
|
1019 | 1020 | return self.vfs.reljoin(self.root, f, *insidef) |
|
1020 | 1021 | |
|
1021 | 1022 | def file(self, f): |
|
1022 | 1023 | if f[0] == '/': |
|
1023 | 1024 | f = f[1:] |
|
1024 | 1025 | return filelog.filelog(self.svfs, f) |
|
1025 | 1026 | |
|
1026 | 1027 | def changectx(self, changeid): |
|
1027 | 1028 | return self[changeid] |
|
1028 | 1029 | |
|
1029 | 1030 | def setparents(self, p1, p2=nullid): |
|
1030 | 1031 | with self.dirstate.parentchange(): |
|
1031 | 1032 | copies = self.dirstate.setparents(p1, p2) |
|
1032 | 1033 | pctx = self[p1] |
|
1033 | 1034 | if copies: |
|
1034 | 1035 | # Adjust copy records, the dirstate cannot do it, it |
|
1035 | 1036 | # requires access to parents manifests. Preserve them |
|
1036 | 1037 | # only for entries added to first parent. |
|
1037 | 1038 | for f in copies: |
|
1038 | 1039 | if f not in pctx and copies[f] in pctx: |
|
1039 | 1040 | self.dirstate.copy(copies[f], f) |
|
1040 | 1041 | if p2 == nullid: |
|
1041 | 1042 | for f, s in sorted(self.dirstate.copies().items()): |
|
1042 | 1043 | if f not in pctx and s not in pctx: |
|
1043 | 1044 | self.dirstate.copy(None, f) |
|
1044 | 1045 | |
|
1045 | 1046 | def filectx(self, path, changeid=None, fileid=None): |
|
1046 | 1047 | """changeid can be a changeset revision, node, or tag. |
|
1047 | 1048 | fileid can be a file revision or node.""" |
|
1048 | 1049 | return context.filectx(self, path, changeid, fileid) |
|
1049 | 1050 | |
|
1050 | 1051 | def getcwd(self): |
|
1051 | 1052 | return self.dirstate.getcwd() |
|
1052 | 1053 | |
|
1053 | 1054 | def pathto(self, f, cwd=None): |
|
1054 | 1055 | return self.dirstate.pathto(f, cwd) |
|
1055 | 1056 | |
|
1056 | 1057 | def _loadfilter(self, filter): |
|
1057 | 1058 | if filter not in self.filterpats: |
|
1058 | 1059 | l = [] |
|
1059 | 1060 | for pat, cmd in self.ui.configitems(filter): |
|
1060 | 1061 | if cmd == '!': |
|
1061 | 1062 | continue |
|
1062 | 1063 | mf = matchmod.match(self.root, '', [pat]) |
|
1063 | 1064 | fn = None |
|
1064 | 1065 | params = cmd |
|
1065 | 1066 | for name, filterfn in self._datafilters.iteritems(): |
|
1066 | 1067 | if cmd.startswith(name): |
|
1067 | 1068 | fn = filterfn |
|
1068 | 1069 | params = cmd[len(name):].lstrip() |
|
1069 | 1070 | break |
|
1070 | 1071 | if not fn: |
|
1071 | 1072 | fn = lambda s, c, **kwargs: util.filter(s, c) |
|
1072 | 1073 | # Wrap old filters not supporting keyword arguments |
|
1073 | 1074 | if not pycompat.getargspec(fn)[2]: |
|
1074 | 1075 | oldfn = fn |
|
1075 | 1076 | fn = lambda s, c, **kwargs: oldfn(s, c) |
|
1076 | 1077 | l.append((mf, fn, params)) |
|
1077 | 1078 | self.filterpats[filter] = l |
|
1078 | 1079 | return self.filterpats[filter] |
|
1079 | 1080 | |
|
1080 | 1081 | def _filter(self, filterpats, filename, data): |
|
1081 | 1082 | for mf, fn, cmd in filterpats: |
|
1082 | 1083 | if mf(filename): |
|
1083 | 1084 | self.ui.debug("filtering %s through %s\n" % (filename, cmd)) |
|
1084 | 1085 | data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) |
|
1085 | 1086 | break |
|
1086 | 1087 | |
|
1087 | 1088 | return data |
|
1088 | 1089 | |
|
1089 | 1090 | @unfilteredpropertycache |
|
1090 | 1091 | def _encodefilterpats(self): |
|
1091 | 1092 | return self._loadfilter('encode') |
|
1092 | 1093 | |
|
1093 | 1094 | @unfilteredpropertycache |
|
1094 | 1095 | def _decodefilterpats(self): |
|
1095 | 1096 | return self._loadfilter('decode') |
|
1096 | 1097 | |
|
1097 | 1098 | def adddatafilter(self, name, filter): |
|
1098 | 1099 | self._datafilters[name] = filter |
|
1099 | 1100 | |
|
1100 | 1101 | def wread(self, filename): |
|
1101 | 1102 | if self.wvfs.islink(filename): |
|
1102 | 1103 | data = self.wvfs.readlink(filename) |
|
1103 | 1104 | else: |
|
1104 | 1105 | data = self.wvfs.read(filename) |
|
1105 | 1106 | return self._filter(self._encodefilterpats, filename, data) |
|
1106 | 1107 | |
|
1107 | 1108 | def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs): |
|
1108 | 1109 | """write ``data`` into ``filename`` in the working directory |
|
1109 | 1110 | |
|
1110 | 1111 | This returns length of written (maybe decoded) data. |
|
1111 | 1112 | """ |
|
1112 | 1113 | data = self._filter(self._decodefilterpats, filename, data) |
|
1113 | 1114 | if 'l' in flags: |
|
1114 | 1115 | self.wvfs.symlink(data, filename) |
|
1115 | 1116 | else: |
|
1116 | 1117 | self.wvfs.write(filename, data, backgroundclose=backgroundclose, |
|
1117 | 1118 | **kwargs) |
|
1118 | 1119 | if 'x' in flags: |
|
1119 | 1120 | self.wvfs.setflags(filename, False, True) |
|
1120 | 1121 | else: |
|
1121 | 1122 | self.wvfs.setflags(filename, False, False) |
|
1122 | 1123 | return len(data) |
|
1123 | 1124 | |
|
1124 | 1125 | def wwritedata(self, filename, data): |
|
1125 | 1126 | return self._filter(self._decodefilterpats, filename, data) |
|
1126 | 1127 | |
|
1127 | 1128 | def currenttransaction(self): |
|
1128 | 1129 | """return the current transaction or None if non exists""" |
|
1129 | 1130 | if self._transref: |
|
1130 | 1131 | tr = self._transref() |
|
1131 | 1132 | else: |
|
1132 | 1133 | tr = None |
|
1133 | 1134 | |
|
1134 | 1135 | if tr and tr.running(): |
|
1135 | 1136 | return tr |
|
1136 | 1137 | return None |
|
1137 | 1138 | |
|
1138 | 1139 | def transaction(self, desc, report=None): |
|
1139 | 1140 | if (self.ui.configbool('devel', 'all-warnings') |
|
1140 | 1141 | or self.ui.configbool('devel', 'check-locks')): |
|
1141 | 1142 | if self._currentlock(self._lockref) is None: |
|
1142 | 1143 | raise error.ProgrammingError('transaction requires locking') |
|
1143 | 1144 | tr = self.currenttransaction() |
|
1144 | 1145 | if tr is not None: |
|
1145 | 1146 | return tr.nest() |
|
1146 | 1147 | |
|
1147 | 1148 | # abort here if the journal already exists |
|
1148 | 1149 | if self.svfs.exists("journal"): |
|
1149 | 1150 | raise error.RepoError( |
|
1150 | 1151 | _("abandoned transaction found"), |
|
1151 | 1152 | hint=_("run 'hg recover' to clean up transaction")) |
|
1152 | 1153 | |
|
1153 | 1154 | idbase = "%.40f#%f" % (random.random(), time.time()) |
|
1154 | 1155 | ha = hex(hashlib.sha1(idbase).digest()) |
|
1155 | 1156 | txnid = 'TXN:' + ha |
|
1156 | 1157 | self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid) |
|
1157 | 1158 | |
|
1158 | 1159 | self._writejournal(desc) |
|
1159 | 1160 | renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] |
|
1160 | 1161 | if report: |
|
1161 | 1162 | rp = report |
|
1162 | 1163 | else: |
|
1163 | 1164 | rp = self.ui.warn |
|
1164 | 1165 | vfsmap = {'plain': self.vfs} # root of .hg/ |
|
1165 | 1166 | # we must avoid cyclic reference between repo and transaction. |
|
1166 | 1167 | reporef = weakref.ref(self) |
|
1167 | 1168 | # Code to track tag movement |
|
1168 | 1169 | # |
|
1169 | 1170 | # Since tags are all handled as file content, it is actually quite hard |
|
1170 | 1171 | # to track these movement from a code perspective. So we fallback to a |
|
1171 | 1172 | # tracking at the repository level. One could envision to track changes |
|
1172 | 1173 | # to the '.hgtags' file through changegroup apply but that fails to |
|
1173 | 1174 | # cope with case where transaction expose new heads without changegroup |
|
1174 | 1175 | # being involved (eg: phase movement). |
|
1175 | 1176 | # |
|
1176 | 1177 | # For now, We gate the feature behind a flag since this likely comes |
|
1177 | 1178 | # with performance impacts. The current code run more often than needed |
|
1178 | 1179 | # and do not use caches as much as it could. The current focus is on |
|
1179 | 1180 | # the behavior of the feature so we disable it by default. The flag |
|
1180 | 1181 | # will be removed when we are happy with the performance impact. |
|
1181 | 1182 | # |
|
1182 | 1183 | # Once this feature is no longer experimental move the following |
|
1183 | 1184 | # documentation to the appropriate help section: |
|
1184 | 1185 | # |
|
1185 | 1186 | # The ``HG_TAG_MOVED`` variable will be set if the transaction touched |
|
1186 | 1187 | # tags (new or changed or deleted tags). In addition the details of |
|
1187 | 1188 | # these changes are made available in a file at: |
|
1188 | 1189 | # ``REPOROOT/.hg/changes/tags.changes``. |
|
1189 | 1190 | # Make sure you check for HG_TAG_MOVED before reading that file as it |
|
1190 | 1191 | # might exist from a previous transaction even if no tag were touched |
|
1191 | 1192 | # in this one. Changes are recorded in a line base format:: |
|
1192 | 1193 | # |
|
1193 | 1194 | # <action> <hex-node> <tag-name>\n |
|
1194 | 1195 | # |
|
1195 | 1196 | # Actions are defined as follow: |
|
1196 | 1197 | # "-R": tag is removed, |
|
1197 | 1198 | # "+A": tag is added, |
|
1198 | 1199 | # "-M": tag is moved (old value), |
|
1199 | 1200 | # "+M": tag is moved (new value), |
|
1200 | 1201 | tracktags = lambda x: None |
|
1201 | 1202 | # experimental config: experimental.hook-track-tags |
|
1202 | 1203 | shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags') |
|
1203 | 1204 | if desc != 'strip' and shouldtracktags: |
|
1204 | 1205 | oldheads = self.changelog.headrevs() |
|
1205 | 1206 | def tracktags(tr2): |
|
1206 | 1207 | repo = reporef() |
|
1207 | 1208 | oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads) |
|
1208 | 1209 | newheads = repo.changelog.headrevs() |
|
1209 | 1210 | newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads) |
|
1210 | 1211 | # notes: we compare lists here. |
|
1211 | 1212 | # As we do it only once buiding set would not be cheaper |
|
1212 | 1213 | changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes) |
|
1213 | 1214 | if changes: |
|
1214 | 1215 | tr2.hookargs['tag_moved'] = '1' |
|
1215 | 1216 | with repo.vfs('changes/tags.changes', 'w', |
|
1216 | 1217 | atomictemp=True) as changesfile: |
|
1217 | 1218 | # note: we do not register the file to the transaction |
|
1218 | 1219 | # because we needs it to still exist on the transaction |
|
1219 | 1220 | # is close (for txnclose hooks) |
|
1220 | 1221 | tagsmod.writediff(changesfile, changes) |
|
1221 | 1222 | def validate(tr2): |
|
1222 | 1223 | """will run pre-closing hooks""" |
|
1223 | 1224 | # XXX the transaction API is a bit lacking here so we take a hacky |
|
1224 | 1225 | # path for now |
|
1225 | 1226 | # |
|
1226 | 1227 | # We cannot add this as a "pending" hooks since the 'tr.hookargs' |
|
1227 | 1228 | # dict is copied before these run. In addition we needs the data |
|
1228 | 1229 | # available to in memory hooks too. |
|
1229 | 1230 | # |
|
1230 | 1231 | # Moreover, we also need to make sure this runs before txnclose |
|
1231 | 1232 | # hooks and there is no "pending" mechanism that would execute |
|
1232 | 1233 | # logic only if hooks are about to run. |
|
1233 | 1234 | # |
|
1234 | 1235 | # Fixing this limitation of the transaction is also needed to track |
|
1235 | 1236 | # other families of changes (bookmarks, phases, obsolescence). |
|
1236 | 1237 | # |
|
1237 | 1238 | # This will have to be fixed before we remove the experimental |
|
1238 | 1239 | # gating. |
|
1239 | 1240 | tracktags(tr2) |
|
1240 | 1241 | repo = reporef() |
|
1241 | 1242 | if repo.ui.configbool('experimental', 'single-head-per-branch'): |
|
1242 | 1243 | scmutil.enforcesinglehead(repo, tr2, desc) |
|
1243 | 1244 | if hook.hashook(repo.ui, 'pretxnclose-bookmark'): |
|
1244 | 1245 | for name, (old, new) in sorted(tr.changes['bookmarks'].items()): |
|
1245 | 1246 | args = tr.hookargs.copy() |
|
1246 | 1247 | args.update(bookmarks.preparehookargs(name, old, new)) |
|
1247 | 1248 | repo.hook('pretxnclose-bookmark', throw=True, |
|
1248 | 1249 | txnname=desc, |
|
1249 | 1250 | **pycompat.strkwargs(args)) |
|
1250 | 1251 | if hook.hashook(repo.ui, 'pretxnclose-phase'): |
|
1251 | 1252 | cl = repo.unfiltered().changelog |
|
1252 | 1253 | for rev, (old, new) in tr.changes['phases'].items(): |
|
1253 | 1254 | args = tr.hookargs.copy() |
|
1254 | 1255 | node = hex(cl.node(rev)) |
|
1255 | 1256 | args.update(phases.preparehookargs(node, old, new)) |
|
1256 | 1257 | repo.hook('pretxnclose-phase', throw=True, txnname=desc, |
|
1257 | 1258 | **pycompat.strkwargs(args)) |
|
1258 | 1259 | |
|
1259 | 1260 | repo.hook('pretxnclose', throw=True, |
|
1260 | 1261 | txnname=desc, **pycompat.strkwargs(tr.hookargs)) |
|
1261 | 1262 | def releasefn(tr, success): |
|
1262 | 1263 | repo = reporef() |
|
1263 | 1264 | if success: |
|
1264 | 1265 | # this should be explicitly invoked here, because |
|
1265 | 1266 | # in-memory changes aren't written out at closing |
|
1266 | 1267 | # transaction, if tr.addfilegenerator (via |
|
1267 | 1268 | # dirstate.write or so) isn't invoked while |
|
1268 | 1269 | # transaction running |
|
1269 | 1270 | repo.dirstate.write(None) |
|
1270 | 1271 | else: |
|
1271 | 1272 | # discard all changes (including ones already written |
|
1272 | 1273 | # out) in this transaction |
|
1273 | 1274 | repo.dirstate.restorebackup(None, 'journal.dirstate') |
|
1274 | 1275 | |
|
1275 | 1276 | repo.invalidate(clearfilecache=True) |
|
1276 | 1277 | |
|
1277 | 1278 | tr = transaction.transaction(rp, self.svfs, vfsmap, |
|
1278 | 1279 | "journal", |
|
1279 | 1280 | "undo", |
|
1280 | 1281 | aftertrans(renames), |
|
1281 | 1282 | self.store.createmode, |
|
1282 | 1283 | validator=validate, |
|
1283 | 1284 | releasefn=releasefn, |
|
1284 | 1285 | checkambigfiles=_cachedfiles) |
|
1285 | 1286 | tr.changes['revs'] = xrange(0, 0) |
|
1286 | 1287 | tr.changes['obsmarkers'] = set() |
|
1287 | 1288 | tr.changes['phases'] = {} |
|
1288 | 1289 | tr.changes['bookmarks'] = {} |
|
1289 | 1290 | |
|
1290 | 1291 | tr.hookargs['txnid'] = txnid |
|
1291 | 1292 | # note: writing the fncache only during finalize mean that the file is |
|
1292 | 1293 | # outdated when running hooks. As fncache is used for streaming clone, |
|
1293 | 1294 | # this is not expected to break anything that happen during the hooks. |
|
1294 | 1295 | tr.addfinalize('flush-fncache', self.store.write) |
|
1295 | 1296 | def txnclosehook(tr2): |
|
1296 | 1297 | """To be run if transaction is successful, will schedule a hook run |
|
1297 | 1298 | """ |
|
1298 | 1299 | # Don't reference tr2 in hook() so we don't hold a reference. |
|
1299 | 1300 | # This reduces memory consumption when there are multiple |
|
1300 | 1301 | # transactions per lock. This can likely go away if issue5045 |
|
1301 | 1302 | # fixes the function accumulation. |
|
1302 | 1303 | hookargs = tr2.hookargs |
|
1303 | 1304 | |
|
1304 | 1305 | def hookfunc(): |
|
1305 | 1306 | repo = reporef() |
|
1306 | 1307 | if hook.hashook(repo.ui, 'txnclose-bookmark'): |
|
1307 | 1308 | bmchanges = sorted(tr.changes['bookmarks'].items()) |
|
1308 | 1309 | for name, (old, new) in bmchanges: |
|
1309 | 1310 | args = tr.hookargs.copy() |
|
1310 | 1311 | args.update(bookmarks.preparehookargs(name, old, new)) |
|
1311 | 1312 | repo.hook('txnclose-bookmark', throw=False, |
|
1312 | 1313 | txnname=desc, **pycompat.strkwargs(args)) |
|
1313 | 1314 | |
|
1314 | 1315 | if hook.hashook(repo.ui, 'txnclose-phase'): |
|
1315 | 1316 | cl = repo.unfiltered().changelog |
|
1316 | 1317 | phasemv = sorted(tr.changes['phases'].items()) |
|
1317 | 1318 | for rev, (old, new) in phasemv: |
|
1318 | 1319 | args = tr.hookargs.copy() |
|
1319 | 1320 | node = hex(cl.node(rev)) |
|
1320 | 1321 | args.update(phases.preparehookargs(node, old, new)) |
|
1321 | 1322 | repo.hook('txnclose-phase', throw=False, txnname=desc, |
|
1322 | 1323 | **pycompat.strkwargs(args)) |
|
1323 | 1324 | |
|
1324 | 1325 | repo.hook('txnclose', throw=False, txnname=desc, |
|
1325 | 1326 | **pycompat.strkwargs(hookargs)) |
|
1326 | 1327 | reporef()._afterlock(hookfunc) |
|
1327 | 1328 | tr.addfinalize('txnclose-hook', txnclosehook) |
|
1328 | 1329 | # Include a leading "-" to make it happen before the transaction summary |
|
1329 | 1330 | # reports registered via scmutil.registersummarycallback() whose names |
|
1330 | 1331 | # are 00-txnreport etc. That way, the caches will be warm when the |
|
1331 | 1332 | # callbacks run. |
|
1332 | 1333 | tr.addpostclose('-warm-cache', self._buildcacheupdater(tr)) |
|
1333 | 1334 | def txnaborthook(tr2): |
|
1334 | 1335 | """To be run if transaction is aborted |
|
1335 | 1336 | """ |
|
1336 | 1337 | reporef().hook('txnabort', throw=False, txnname=desc, |
|
1337 | 1338 | **pycompat.strkwargs(tr2.hookargs)) |
|
1338 | 1339 | tr.addabort('txnabort-hook', txnaborthook) |
|
1339 | 1340 | # avoid eager cache invalidation. in-memory data should be identical |
|
1340 | 1341 | # to stored data if transaction has no error. |
|
1341 | 1342 | tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats) |
|
1342 | 1343 | self._transref = weakref.ref(tr) |
|
1343 | 1344 | scmutil.registersummarycallback(self, tr, desc) |
|
1344 | 1345 | return tr |
|
1345 | 1346 | |
|
1346 | 1347 | def _journalfiles(self): |
|
1347 | 1348 | return ((self.svfs, 'journal'), |
|
1348 | 1349 | (self.vfs, 'journal.dirstate'), |
|
1349 | 1350 | (self.vfs, 'journal.branch'), |
|
1350 | 1351 | (self.vfs, 'journal.desc'), |
|
1351 | 1352 | (self.vfs, 'journal.bookmarks'), |
|
1352 | 1353 | (self.svfs, 'journal.phaseroots')) |
|
1353 | 1354 | |
|
1354 | 1355 | def undofiles(self): |
|
1355 | 1356 | return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] |
|
1356 | 1357 | |
|
1357 | 1358 | @unfilteredmethod |
|
1358 | 1359 | def _writejournal(self, desc): |
|
1359 | 1360 | self.dirstate.savebackup(None, 'journal.dirstate') |
|
1360 | 1361 | self.vfs.write("journal.branch", |
|
1361 | 1362 | encoding.fromlocal(self.dirstate.branch())) |
|
1362 | 1363 | self.vfs.write("journal.desc", |
|
1363 | 1364 | "%d\n%s\n" % (len(self), desc)) |
|
1364 | 1365 | self.vfs.write("journal.bookmarks", |
|
1365 | 1366 | self.vfs.tryread("bookmarks")) |
|
1366 | 1367 | self.svfs.write("journal.phaseroots", |
|
1367 | 1368 | self.svfs.tryread("phaseroots")) |
|
1368 | 1369 | |
|
1369 | 1370 | def recover(self): |
|
1370 | 1371 | with self.lock(): |
|
1371 | 1372 | if self.svfs.exists("journal"): |
|
1372 | 1373 | self.ui.status(_("rolling back interrupted transaction\n")) |
|
1373 | 1374 | vfsmap = {'': self.svfs, |
|
1374 | 1375 | 'plain': self.vfs,} |
|
1375 | 1376 | transaction.rollback(self.svfs, vfsmap, "journal", |
|
1376 | 1377 | self.ui.warn, |
|
1377 | 1378 | checkambigfiles=_cachedfiles) |
|
1378 | 1379 | self.invalidate() |
|
1379 | 1380 | return True |
|
1380 | 1381 | else: |
|
1381 | 1382 | self.ui.warn(_("no interrupted transaction available\n")) |
|
1382 | 1383 | return False |
|
1383 | 1384 | |
|
1384 | 1385 | def rollback(self, dryrun=False, force=False): |
|
1385 | 1386 | wlock = lock = dsguard = None |
|
1386 | 1387 | try: |
|
1387 | 1388 | wlock = self.wlock() |
|
1388 | 1389 | lock = self.lock() |
|
1389 | 1390 | if self.svfs.exists("undo"): |
|
1390 | 1391 | dsguard = dirstateguard.dirstateguard(self, 'rollback') |
|
1391 | 1392 | |
|
1392 | 1393 | return self._rollback(dryrun, force, dsguard) |
|
1393 | 1394 | else: |
|
1394 | 1395 | self.ui.warn(_("no rollback information available\n")) |
|
1395 | 1396 | return 1 |
|
1396 | 1397 | finally: |
|
1397 | 1398 | release(dsguard, lock, wlock) |
|
1398 | 1399 | |
|
1399 | 1400 | @unfilteredmethod # Until we get smarter cache management |
|
1400 | 1401 | def _rollback(self, dryrun, force, dsguard): |
|
1401 | 1402 | ui = self.ui |
|
1402 | 1403 | try: |
|
1403 | 1404 | args = self.vfs.read('undo.desc').splitlines() |
|
1404 | 1405 | (oldlen, desc, detail) = (int(args[0]), args[1], None) |
|
1405 | 1406 | if len(args) >= 3: |
|
1406 | 1407 | detail = args[2] |
|
1407 | 1408 | oldtip = oldlen - 1 |
|
1408 | 1409 | |
|
1409 | 1410 | if detail and ui.verbose: |
|
1410 | 1411 | msg = (_('repository tip rolled back to revision %d' |
|
1411 | 1412 | ' (undo %s: %s)\n') |
|
1412 | 1413 | % (oldtip, desc, detail)) |
|
1413 | 1414 | else: |
|
1414 | 1415 | msg = (_('repository tip rolled back to revision %d' |
|
1415 | 1416 | ' (undo %s)\n') |
|
1416 | 1417 | % (oldtip, desc)) |
|
1417 | 1418 | except IOError: |
|
1418 | 1419 | msg = _('rolling back unknown transaction\n') |
|
1419 | 1420 | desc = None |
|
1420 | 1421 | |
|
1421 | 1422 | if not force and self['.'] != self['tip'] and desc == 'commit': |
|
1422 | 1423 | raise error.Abort( |
|
1423 | 1424 | _('rollback of last commit while not checked out ' |
|
1424 | 1425 | 'may lose data'), hint=_('use -f to force')) |
|
1425 | 1426 | |
|
1426 | 1427 | ui.status(msg) |
|
1427 | 1428 | if dryrun: |
|
1428 | 1429 | return 0 |
|
1429 | 1430 | |
|
1430 | 1431 | parents = self.dirstate.parents() |
|
1431 | 1432 | self.destroying() |
|
1432 | 1433 | vfsmap = {'plain': self.vfs, '': self.svfs} |
|
1433 | 1434 | transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn, |
|
1434 | 1435 | checkambigfiles=_cachedfiles) |
|
1435 | 1436 | if self.vfs.exists('undo.bookmarks'): |
|
1436 | 1437 | self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True) |
|
1437 | 1438 | if self.svfs.exists('undo.phaseroots'): |
|
1438 | 1439 | self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True) |
|
1439 | 1440 | self.invalidate() |
|
1440 | 1441 | |
|
1441 | 1442 | parentgone = (parents[0] not in self.changelog.nodemap or |
|
1442 | 1443 | parents[1] not in self.changelog.nodemap) |
|
1443 | 1444 | if parentgone: |
|
1444 | 1445 | # prevent dirstateguard from overwriting already restored one |
|
1445 | 1446 | dsguard.close() |
|
1446 | 1447 | |
|
1447 | 1448 | self.dirstate.restorebackup(None, 'undo.dirstate') |
|
1448 | 1449 | try: |
|
1449 | 1450 | branch = self.vfs.read('undo.branch') |
|
1450 | 1451 | self.dirstate.setbranch(encoding.tolocal(branch)) |
|
1451 | 1452 | except IOError: |
|
1452 | 1453 | ui.warn(_('named branch could not be reset: ' |
|
1453 | 1454 | 'current branch is still \'%s\'\n') |
|
1454 | 1455 | % self.dirstate.branch()) |
|
1455 | 1456 | |
|
1456 | 1457 | parents = tuple([p.rev() for p in self[None].parents()]) |
|
1457 | 1458 | if len(parents) > 1: |
|
1458 | 1459 | ui.status(_('working directory now based on ' |
|
1459 | 1460 | 'revisions %d and %d\n') % parents) |
|
1460 | 1461 | else: |
|
1461 | 1462 | ui.status(_('working directory now based on ' |
|
1462 | 1463 | 'revision %d\n') % parents) |
|
1463 | 1464 | mergemod.mergestate.clean(self, self['.'].node()) |
|
1464 | 1465 | |
|
1465 | 1466 | # TODO: if we know which new heads may result from this rollback, pass |
|
1466 | 1467 | # them to destroy(), which will prevent the branchhead cache from being |
|
1467 | 1468 | # invalidated. |
|
1468 | 1469 | self.destroyed() |
|
1469 | 1470 | return 0 |
|
1470 | 1471 | |
|
1471 | 1472 | def _buildcacheupdater(self, newtransaction): |
|
1472 | 1473 | """called during transaction to build the callback updating cache |
|
1473 | 1474 | |
|
1474 | 1475 | Lives on the repository to help extension who might want to augment |
|
1475 | 1476 | this logic. For this purpose, the created transaction is passed to the |
|
1476 | 1477 | method. |
|
1477 | 1478 | """ |
|
1478 | 1479 | # we must avoid cyclic reference between repo and transaction. |
|
1479 | 1480 | reporef = weakref.ref(self) |
|
1480 | 1481 | def updater(tr): |
|
1481 | 1482 | repo = reporef() |
|
1482 | 1483 | repo.updatecaches(tr) |
|
1483 | 1484 | return updater |
|
1484 | 1485 | |
|
1485 | 1486 | @unfilteredmethod |
|
1486 | 1487 | def updatecaches(self, tr=None): |
|
1487 | 1488 | """warm appropriate caches |
|
1488 | 1489 | |
|
1489 | 1490 | If this function is called after a transaction closed. The transaction |
|
1490 | 1491 | will be available in the 'tr' argument. This can be used to selectively |
|
1491 | 1492 | update caches relevant to the changes in that transaction. |
|
1492 | 1493 | """ |
|
1493 | 1494 | if tr is not None and tr.hookargs.get('source') == 'strip': |
|
1494 | 1495 | # During strip, many caches are invalid but |
|
1495 | 1496 | # later call to `destroyed` will refresh them. |
|
1496 | 1497 | return |
|
1497 | 1498 | |
|
1498 | 1499 | if tr is None or tr.changes['revs']: |
|
1499 | 1500 | # updating the unfiltered branchmap should refresh all the others, |
|
1500 | 1501 | self.ui.debug('updating the branch cache\n') |
|
1501 | 1502 | branchmap.updatecache(self.filtered('served')) |
|
1502 | 1503 | |
|
1503 | 1504 | def invalidatecaches(self): |
|
1504 | 1505 | |
|
1505 | 1506 | if '_tagscache' in vars(self): |
|
1506 | 1507 | # can't use delattr on proxy |
|
1507 | 1508 | del self.__dict__['_tagscache'] |
|
1508 | 1509 | |
|
1509 | 1510 | self.unfiltered()._branchcaches.clear() |
|
1510 | 1511 | self.invalidatevolatilesets() |
|
1511 | 1512 | self._sparsesignaturecache.clear() |
|
1512 | 1513 | |
|
1513 | 1514 | def invalidatevolatilesets(self): |
|
1514 | 1515 | self.filteredrevcache.clear() |
|
1515 | 1516 | obsolete.clearobscaches(self) |
|
1516 | 1517 | |
|
1517 | 1518 | def invalidatedirstate(self): |
|
1518 | 1519 | '''Invalidates the dirstate, causing the next call to dirstate |
|
1519 | 1520 | to check if it was modified since the last time it was read, |
|
1520 | 1521 | rereading it if it has. |
|
1521 | 1522 | |
|
1522 | 1523 | This is different to dirstate.invalidate() that it doesn't always |
|
1523 | 1524 | rereads the dirstate. Use dirstate.invalidate() if you want to |
|
1524 | 1525 | explicitly read the dirstate again (i.e. restoring it to a previous |
|
1525 | 1526 | known good state).''' |
|
1526 | 1527 | if hasunfilteredcache(self, 'dirstate'): |
|
1527 | 1528 | for k in self.dirstate._filecache: |
|
1528 | 1529 | try: |
|
1529 | 1530 | delattr(self.dirstate, k) |
|
1530 | 1531 | except AttributeError: |
|
1531 | 1532 | pass |
|
1532 | 1533 | delattr(self.unfiltered(), 'dirstate') |
|
1533 | 1534 | |
|
1534 | 1535 | def invalidate(self, clearfilecache=False): |
|
1535 | 1536 | '''Invalidates both store and non-store parts other than dirstate |
|
1536 | 1537 | |
|
1537 | 1538 | If a transaction is running, invalidation of store is omitted, |
|
1538 | 1539 | because discarding in-memory changes might cause inconsistency |
|
1539 | 1540 | (e.g. incomplete fncache causes unintentional failure, but |
|
1540 | 1541 | redundant one doesn't). |
|
1541 | 1542 | ''' |
|
1542 | 1543 | unfiltered = self.unfiltered() # all file caches are stored unfiltered |
|
1543 | 1544 | for k in list(self._filecache.keys()): |
|
1544 | 1545 | # dirstate is invalidated separately in invalidatedirstate() |
|
1545 | 1546 | if k == 'dirstate': |
|
1546 | 1547 | continue |
|
1547 | 1548 | if (k == 'changelog' and |
|
1548 | 1549 | self.currenttransaction() and |
|
1549 | 1550 | self.changelog._delayed): |
|
1550 | 1551 | # The changelog object may store unwritten revisions. We don't |
|
1551 | 1552 | # want to lose them. |
|
1552 | 1553 | # TODO: Solve the problem instead of working around it. |
|
1553 | 1554 | continue |
|
1554 | 1555 | |
|
1555 | 1556 | if clearfilecache: |
|
1556 | 1557 | del self._filecache[k] |
|
1557 | 1558 | try: |
|
1558 | 1559 | delattr(unfiltered, k) |
|
1559 | 1560 | except AttributeError: |
|
1560 | 1561 | pass |
|
1561 | 1562 | self.invalidatecaches() |
|
1562 | 1563 | if not self.currenttransaction(): |
|
1563 | 1564 | # TODO: Changing contents of store outside transaction |
|
1564 | 1565 | # causes inconsistency. We should make in-memory store |
|
1565 | 1566 | # changes detectable, and abort if changed. |
|
1566 | 1567 | self.store.invalidatecaches() |
|
1567 | 1568 | |
|
1568 | 1569 | def invalidateall(self): |
|
1569 | 1570 | '''Fully invalidates both store and non-store parts, causing the |
|
1570 | 1571 | subsequent operation to reread any outside changes.''' |
|
1571 | 1572 | # extension should hook this to invalidate its caches |
|
1572 | 1573 | self.invalidate() |
|
1573 | 1574 | self.invalidatedirstate() |
|
1574 | 1575 | |
|
1575 | 1576 | @unfilteredmethod |
|
1576 | 1577 | def _refreshfilecachestats(self, tr): |
|
1577 | 1578 | """Reload stats of cached files so that they are flagged as valid""" |
|
1578 | 1579 | for k, ce in self._filecache.items(): |
|
1579 | 1580 | k = pycompat.sysstr(k) |
|
1580 | 1581 | if k == r'dirstate' or k not in self.__dict__: |
|
1581 | 1582 | continue |
|
1582 | 1583 | ce.refresh() |
|
1583 | 1584 | |
|
1584 | 1585 | def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc, |
|
1585 | 1586 | inheritchecker=None, parentenvvar=None): |
|
1586 | 1587 | parentlock = None |
|
1587 | 1588 | # the contents of parentenvvar are used by the underlying lock to |
|
1588 | 1589 | # determine whether it can be inherited |
|
1589 | 1590 | if parentenvvar is not None: |
|
1590 | 1591 | parentlock = encoding.environ.get(parentenvvar) |
|
1591 | 1592 | |
|
1592 | 1593 | timeout = 0 |
|
1593 | 1594 | warntimeout = 0 |
|
1594 | 1595 | if wait: |
|
1595 | 1596 | timeout = self.ui.configint("ui", "timeout") |
|
1596 | 1597 | warntimeout = self.ui.configint("ui", "timeout.warn") |
|
1597 | 1598 | |
|
1598 | 1599 | l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout, |
|
1599 | 1600 | releasefn=releasefn, |
|
1600 | 1601 | acquirefn=acquirefn, desc=desc, |
|
1601 | 1602 | inheritchecker=inheritchecker, |
|
1602 | 1603 | parentlock=parentlock) |
|
1603 | 1604 | return l |
|
1604 | 1605 | |
|
1605 | 1606 | def _afterlock(self, callback): |
|
1606 | 1607 | """add a callback to be run when the repository is fully unlocked |
|
1607 | 1608 | |
|
1608 | 1609 | The callback will be executed when the outermost lock is released |
|
1609 | 1610 | (with wlock being higher level than 'lock').""" |
|
1610 | 1611 | for ref in (self._wlockref, self._lockref): |
|
1611 | 1612 | l = ref and ref() |
|
1612 | 1613 | if l and l.held: |
|
1613 | 1614 | l.postrelease.append(callback) |
|
1614 | 1615 | break |
|
1615 | 1616 | else: # no lock have been found. |
|
1616 | 1617 | callback() |
|
1617 | 1618 | |
|
1618 | 1619 | def lock(self, wait=True): |
|
1619 | 1620 | '''Lock the repository store (.hg/store) and return a weak reference |
|
1620 | 1621 | to the lock. Use this before modifying the store (e.g. committing or |
|
1621 | 1622 | stripping). If you are opening a transaction, get a lock as well.) |
|
1622 | 1623 | |
|
1623 | 1624 | If both 'lock' and 'wlock' must be acquired, ensure you always acquires |
|
1624 | 1625 | 'wlock' first to avoid a dead-lock hazard.''' |
|
1625 | 1626 | l = self._currentlock(self._lockref) |
|
1626 | 1627 | if l is not None: |
|
1627 | 1628 | l.lock() |
|
1628 | 1629 | return l |
|
1629 | 1630 | |
|
1630 | 1631 | l = self._lock(self.svfs, "lock", wait, None, |
|
1631 | 1632 | self.invalidate, _('repository %s') % self.origroot) |
|
1632 | 1633 | self._lockref = weakref.ref(l) |
|
1633 | 1634 | return l |
|
1634 | 1635 | |
|
1635 | 1636 | def _wlockchecktransaction(self): |
|
1636 | 1637 | if self.currenttransaction() is not None: |
|
1637 | 1638 | raise error.LockInheritanceContractViolation( |
|
1638 | 1639 | 'wlock cannot be inherited in the middle of a transaction') |
|
1639 | 1640 | |
|
1640 | 1641 | def wlock(self, wait=True): |
|
1641 | 1642 | '''Lock the non-store parts of the repository (everything under |
|
1642 | 1643 | .hg except .hg/store) and return a weak reference to the lock. |
|
1643 | 1644 | |
|
1644 | 1645 | Use this before modifying files in .hg. |
|
1645 | 1646 | |
|
1646 | 1647 | If both 'lock' and 'wlock' must be acquired, ensure you always acquires |
|
1647 | 1648 | 'wlock' first to avoid a dead-lock hazard.''' |
|
1648 | 1649 | l = self._wlockref and self._wlockref() |
|
1649 | 1650 | if l is not None and l.held: |
|
1650 | 1651 | l.lock() |
|
1651 | 1652 | return l |
|
1652 | 1653 | |
|
1653 | 1654 | # We do not need to check for non-waiting lock acquisition. Such |
|
1654 | 1655 | # acquisition would not cause dead-lock as they would just fail. |
|
1655 | 1656 | if wait and (self.ui.configbool('devel', 'all-warnings') |
|
1656 | 1657 | or self.ui.configbool('devel', 'check-locks')): |
|
1657 | 1658 | if self._currentlock(self._lockref) is not None: |
|
1658 | 1659 | self.ui.develwarn('"wlock" acquired after "lock"') |
|
1659 | 1660 | |
|
1660 | 1661 | def unlock(): |
|
1661 | 1662 | if self.dirstate.pendingparentchange(): |
|
1662 | 1663 | self.dirstate.invalidate() |
|
1663 | 1664 | else: |
|
1664 | 1665 | self.dirstate.write(None) |
|
1665 | 1666 | |
|
1666 | 1667 | self._filecache['dirstate'].refresh() |
|
1667 | 1668 | |
|
1668 | 1669 | l = self._lock(self.vfs, "wlock", wait, unlock, |
|
1669 | 1670 | self.invalidatedirstate, _('working directory of %s') % |
|
1670 | 1671 | self.origroot, |
|
1671 | 1672 | inheritchecker=self._wlockchecktransaction, |
|
1672 | 1673 | parentenvvar='HG_WLOCK_LOCKER') |
|
1673 | 1674 | self._wlockref = weakref.ref(l) |
|
1674 | 1675 | return l |
|
1675 | 1676 | |
|
1676 | 1677 | def _currentlock(self, lockref): |
|
1677 | 1678 | """Returns the lock if it's held, or None if it's not.""" |
|
1678 | 1679 | if lockref is None: |
|
1679 | 1680 | return None |
|
1680 | 1681 | l = lockref() |
|
1681 | 1682 | if l is None or not l.held: |
|
1682 | 1683 | return None |
|
1683 | 1684 | return l |
|
1684 | 1685 | |
|
1685 | 1686 | def currentwlock(self): |
|
1686 | 1687 | """Returns the wlock if it's held, or None if it's not.""" |
|
1687 | 1688 | return self._currentlock(self._wlockref) |
|
1688 | 1689 | |
|
1689 | 1690 | def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist): |
|
1690 | 1691 | """ |
|
1691 | 1692 | commit an individual file as part of a larger transaction |
|
1692 | 1693 | """ |
|
1693 | 1694 | |
|
1694 | 1695 | fname = fctx.path() |
|
1695 | 1696 | fparent1 = manifest1.get(fname, nullid) |
|
1696 | 1697 | fparent2 = manifest2.get(fname, nullid) |
|
1697 | 1698 | if isinstance(fctx, context.filectx): |
|
1698 | 1699 | node = fctx.filenode() |
|
1699 | 1700 | if node in [fparent1, fparent2]: |
|
1700 | 1701 | self.ui.debug('reusing %s filelog entry\n' % fname) |
|
1701 | 1702 | if manifest1.flags(fname) != fctx.flags(): |
|
1702 | 1703 | changelist.append(fname) |
|
1703 | 1704 | return node |
|
1704 | 1705 | |
|
1705 | 1706 | flog = self.file(fname) |
|
1706 | 1707 | meta = {} |
|
1707 | 1708 | copy = fctx.renamed() |
|
1708 | 1709 | if copy and copy[0] != fname: |
|
1709 | 1710 | # Mark the new revision of this file as a copy of another |
|
1710 | 1711 | # file. This copy data will effectively act as a parent |
|
1711 | 1712 | # of this new revision. If this is a merge, the first |
|
1712 | 1713 | # parent will be the nullid (meaning "look up the copy data") |
|
1713 | 1714 | # and the second one will be the other parent. For example: |
|
1714 | 1715 | # |
|
1715 | 1716 | # 0 --- 1 --- 3 rev1 changes file foo |
|
1716 | 1717 | # \ / rev2 renames foo to bar and changes it |
|
1717 | 1718 | # \- 2 -/ rev3 should have bar with all changes and |
|
1718 | 1719 | # should record that bar descends from |
|
1719 | 1720 | # bar in rev2 and foo in rev1 |
|
1720 | 1721 | # |
|
1721 | 1722 | # this allows this merge to succeed: |
|
1722 | 1723 | # |
|
1723 | 1724 | # 0 --- 1 --- 3 rev4 reverts the content change from rev2 |
|
1724 | 1725 | # \ / merging rev3 and rev4 should use bar@rev2 |
|
1725 | 1726 | # \- 2 --- 4 as the merge base |
|
1726 | 1727 | # |
|
1727 | 1728 | |
|
1728 | 1729 | cfname = copy[0] |
|
1729 | 1730 | crev = manifest1.get(cfname) |
|
1730 | 1731 | newfparent = fparent2 |
|
1731 | 1732 | |
|
1732 | 1733 | if manifest2: # branch merge |
|
1733 | 1734 | if fparent2 == nullid or crev is None: # copied on remote side |
|
1734 | 1735 | if cfname in manifest2: |
|
1735 | 1736 | crev = manifest2[cfname] |
|
1736 | 1737 | newfparent = fparent1 |
|
1737 | 1738 | |
|
1738 | 1739 | # Here, we used to search backwards through history to try to find |
|
1739 | 1740 | # where the file copy came from if the source of a copy was not in |
|
1740 | 1741 | # the parent directory. However, this doesn't actually make sense to |
|
1741 | 1742 | # do (what does a copy from something not in your working copy even |
|
1742 | 1743 | # mean?) and it causes bugs (eg, issue4476). Instead, we will warn |
|
1743 | 1744 | # the user that copy information was dropped, so if they didn't |
|
1744 | 1745 | # expect this outcome it can be fixed, but this is the correct |
|
1745 | 1746 | # behavior in this circumstance. |
|
1746 | 1747 | |
|
1747 | 1748 | if crev: |
|
1748 | 1749 | self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev))) |
|
1749 | 1750 | meta["copy"] = cfname |
|
1750 | 1751 | meta["copyrev"] = hex(crev) |
|
1751 | 1752 | fparent1, fparent2 = nullid, newfparent |
|
1752 | 1753 | else: |
|
1753 | 1754 | self.ui.warn(_("warning: can't find ancestor for '%s' " |
|
1754 | 1755 | "copied from '%s'!\n") % (fname, cfname)) |
|
1755 | 1756 | |
|
1756 | 1757 | elif fparent1 == nullid: |
|
1757 | 1758 | fparent1, fparent2 = fparent2, nullid |
|
1758 | 1759 | elif fparent2 != nullid: |
|
1759 | 1760 | # is one parent an ancestor of the other? |
|
1760 | 1761 | fparentancestors = flog.commonancestorsheads(fparent1, fparent2) |
|
1761 | 1762 | if fparent1 in fparentancestors: |
|
1762 | 1763 | fparent1, fparent2 = fparent2, nullid |
|
1763 | 1764 | elif fparent2 in fparentancestors: |
|
1764 | 1765 | fparent2 = nullid |
|
1765 | 1766 | |
|
1766 | 1767 | # is the file changed? |
|
1767 | 1768 | text = fctx.data() |
|
1768 | 1769 | if fparent2 != nullid or flog.cmp(fparent1, text) or meta: |
|
1769 | 1770 | changelist.append(fname) |
|
1770 | 1771 | return flog.add(text, meta, tr, linkrev, fparent1, fparent2) |
|
1771 | 1772 | # are just the flags changed during merge? |
|
1772 | 1773 | elif fname in manifest1 and manifest1.flags(fname) != fctx.flags(): |
|
1773 | 1774 | changelist.append(fname) |
|
1774 | 1775 | |
|
1775 | 1776 | return fparent1 |
|
1776 | 1777 | |
|
1777 | 1778 | def checkcommitpatterns(self, wctx, vdirs, match, status, fail): |
|
1778 | 1779 | """check for commit arguments that aren't committable""" |
|
1779 | 1780 | if match.isexact() or match.prefix(): |
|
1780 | 1781 | matched = set(status.modified + status.added + status.removed) |
|
1781 | 1782 | |
|
1782 | 1783 | for f in match.files(): |
|
1783 | 1784 | f = self.dirstate.normalize(f) |
|
1784 | 1785 | if f == '.' or f in matched or f in wctx.substate: |
|
1785 | 1786 | continue |
|
1786 | 1787 | if f in status.deleted: |
|
1787 | 1788 | fail(f, _('file not found!')) |
|
1788 | 1789 | if f in vdirs: # visited directory |
|
1789 | 1790 | d = f + '/' |
|
1790 | 1791 | for mf in matched: |
|
1791 | 1792 | if mf.startswith(d): |
|
1792 | 1793 | break |
|
1793 | 1794 | else: |
|
1794 | 1795 | fail(f, _("no match under directory!")) |
|
1795 | 1796 | elif f not in self.dirstate: |
|
1796 | 1797 | fail(f, _("file not tracked!")) |
|
1797 | 1798 | |
|
1798 | 1799 | @unfilteredmethod |
|
1799 | 1800 | def commit(self, text="", user=None, date=None, match=None, force=False, |
|
1800 | 1801 | editor=False, extra=None): |
|
1801 | 1802 | """Add a new revision to current repository. |
|
1802 | 1803 | |
|
1803 | 1804 | Revision information is gathered from the working directory, |
|
1804 | 1805 | match can be used to filter the committed files. If editor is |
|
1805 | 1806 | supplied, it is called to get a commit message. |
|
1806 | 1807 | """ |
|
1807 | 1808 | if extra is None: |
|
1808 | 1809 | extra = {} |
|
1809 | 1810 | |
|
1810 | 1811 | def fail(f, msg): |
|
1811 | 1812 | raise error.Abort('%s: %s' % (f, msg)) |
|
1812 | 1813 | |
|
1813 | 1814 | if not match: |
|
1814 | 1815 | match = matchmod.always(self.root, '') |
|
1815 | 1816 | |
|
1816 | 1817 | if not force: |
|
1817 | 1818 | vdirs = [] |
|
1818 | 1819 | match.explicitdir = vdirs.append |
|
1819 | 1820 | match.bad = fail |
|
1820 | 1821 | |
|
1821 | 1822 | wlock = lock = tr = None |
|
1822 | 1823 | try: |
|
1823 | 1824 | wlock = self.wlock() |
|
1824 | 1825 | lock = self.lock() # for recent changelog (see issue4368) |
|
1825 | 1826 | |
|
1826 | 1827 | wctx = self[None] |
|
1827 | 1828 | merge = len(wctx.parents()) > 1 |
|
1828 | 1829 | |
|
1829 | 1830 | if not force and merge and not match.always(): |
|
1830 | 1831 | raise error.Abort(_('cannot partially commit a merge ' |
|
1831 | 1832 | '(do not specify files or patterns)')) |
|
1832 | 1833 | |
|
1833 | 1834 | status = self.status(match=match, clean=force) |
|
1834 | 1835 | if force: |
|
1835 | 1836 | status.modified.extend(status.clean) # mq may commit clean files |
|
1836 | 1837 | |
|
1837 | 1838 | # check subrepos |
|
1838 | 1839 | subs, commitsubs, newstate = subrepoutil.precommit( |
|
1839 | 1840 | self.ui, wctx, status, match, force=force) |
|
1840 | 1841 | |
|
1841 | 1842 | # make sure all explicit patterns are matched |
|
1842 | 1843 | if not force: |
|
1843 | 1844 | self.checkcommitpatterns(wctx, vdirs, match, status, fail) |
|
1844 | 1845 | |
|
1845 | 1846 | cctx = context.workingcommitctx(self, status, |
|
1846 | 1847 | text, user, date, extra) |
|
1847 | 1848 | |
|
1848 | 1849 | # internal config: ui.allowemptycommit |
|
1849 | 1850 | allowemptycommit = (wctx.branch() != wctx.p1().branch() |
|
1850 | 1851 | or extra.get('close') or merge or cctx.files() |
|
1851 | 1852 | or self.ui.configbool('ui', 'allowemptycommit')) |
|
1852 | 1853 | if not allowemptycommit: |
|
1853 | 1854 | return None |
|
1854 | 1855 | |
|
1855 | 1856 | if merge and cctx.deleted(): |
|
1856 | 1857 | raise error.Abort(_("cannot commit merge with missing files")) |
|
1857 | 1858 | |
|
1858 | 1859 | ms = mergemod.mergestate.read(self) |
|
1859 | 1860 | mergeutil.checkunresolved(ms) |
|
1860 | 1861 | |
|
1861 | 1862 | if editor: |
|
1862 | 1863 | cctx._text = editor(self, cctx, subs) |
|
1863 | 1864 | edited = (text != cctx._text) |
|
1864 | 1865 | |
|
1865 | 1866 | # Save commit message in case this transaction gets rolled back |
|
1866 | 1867 | # (e.g. by a pretxncommit hook). Leave the content alone on |
|
1867 | 1868 | # the assumption that the user will use the same editor again. |
|
1868 | 1869 | msgfn = self.savecommitmessage(cctx._text) |
|
1869 | 1870 | |
|
1870 | 1871 | # commit subs and write new state |
|
1871 | 1872 | if subs: |
|
1872 | 1873 | for s in sorted(commitsubs): |
|
1873 | 1874 | sub = wctx.sub(s) |
|
1874 | 1875 | self.ui.status(_('committing subrepository %s\n') % |
|
1875 | 1876 | subrepoutil.subrelpath(sub)) |
|
1876 | 1877 | sr = sub.commit(cctx._text, user, date) |
|
1877 | 1878 | newstate[s] = (newstate[s][0], sr) |
|
1878 | 1879 | subrepoutil.writestate(self, newstate) |
|
1879 | 1880 | |
|
1880 | 1881 | p1, p2 = self.dirstate.parents() |
|
1881 | 1882 | hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '') |
|
1882 | 1883 | try: |
|
1883 | 1884 | self.hook("precommit", throw=True, parent1=hookp1, |
|
1884 | 1885 | parent2=hookp2) |
|
1885 | 1886 | tr = self.transaction('commit') |
|
1886 | 1887 | ret = self.commitctx(cctx, True) |
|
1887 | 1888 | except: # re-raises |
|
1888 | 1889 | if edited: |
|
1889 | 1890 | self.ui.write( |
|
1890 | 1891 | _('note: commit message saved in %s\n') % msgfn) |
|
1891 | 1892 | raise |
|
1892 | 1893 | # update bookmarks, dirstate and mergestate |
|
1893 | 1894 | bookmarks.update(self, [p1, p2], ret) |
|
1894 | 1895 | cctx.markcommitted(ret) |
|
1895 | 1896 | ms.reset() |
|
1896 | 1897 | tr.close() |
|
1897 | 1898 | |
|
1898 | 1899 | finally: |
|
1899 | 1900 | lockmod.release(tr, lock, wlock) |
|
1900 | 1901 | |
|
1901 | 1902 | def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2): |
|
1902 | 1903 | # hack for command that use a temporary commit (eg: histedit) |
|
1903 | 1904 | # temporary commit got stripped before hook release |
|
1904 | 1905 | if self.changelog.hasnode(ret): |
|
1905 | 1906 | self.hook("commit", node=node, parent1=parent1, |
|
1906 | 1907 | parent2=parent2) |
|
1907 | 1908 | self._afterlock(commithook) |
|
1908 | 1909 | return ret |
|
1909 | 1910 | |
|
1910 | 1911 | @unfilteredmethod |
|
1911 | 1912 | def commitctx(self, ctx, error=False): |
|
1912 | 1913 | """Add a new revision to current repository. |
|
1913 | 1914 | Revision information is passed via the context argument. |
|
1914 | 1915 | """ |
|
1915 | 1916 | |
|
1916 | 1917 | tr = None |
|
1917 | 1918 | p1, p2 = ctx.p1(), ctx.p2() |
|
1918 | 1919 | user = ctx.user() |
|
1919 | 1920 | |
|
1920 | 1921 | lock = self.lock() |
|
1921 | 1922 | try: |
|
1922 | 1923 | tr = self.transaction("commit") |
|
1923 | 1924 | trp = weakref.proxy(tr) |
|
1924 | 1925 | |
|
1925 | 1926 | if ctx.manifestnode(): |
|
1926 | 1927 | # reuse an existing manifest revision |
|
1927 | 1928 | mn = ctx.manifestnode() |
|
1928 | 1929 | files = ctx.files() |
|
1929 | 1930 | elif ctx.files(): |
|
1930 | 1931 | m1ctx = p1.manifestctx() |
|
1931 | 1932 | m2ctx = p2.manifestctx() |
|
1932 | 1933 | mctx = m1ctx.copy() |
|
1933 | 1934 | |
|
1934 | 1935 | m = mctx.read() |
|
1935 | 1936 | m1 = m1ctx.read() |
|
1936 | 1937 | m2 = m2ctx.read() |
|
1937 | 1938 | |
|
1938 | 1939 | # check in files |
|
1939 | 1940 | added = [] |
|
1940 | 1941 | changed = [] |
|
1941 | 1942 | removed = list(ctx.removed()) |
|
1942 | 1943 | linkrev = len(self) |
|
1943 | 1944 | self.ui.note(_("committing files:\n")) |
|
1944 | 1945 | for f in sorted(ctx.modified() + ctx.added()): |
|
1945 | 1946 | self.ui.note(f + "\n") |
|
1946 | 1947 | try: |
|
1947 | 1948 | fctx = ctx[f] |
|
1948 | 1949 | if fctx is None: |
|
1949 | 1950 | removed.append(f) |
|
1950 | 1951 | else: |
|
1951 | 1952 | added.append(f) |
|
1952 | 1953 | m[f] = self._filecommit(fctx, m1, m2, linkrev, |
|
1953 | 1954 | trp, changed) |
|
1954 | 1955 | m.setflag(f, fctx.flags()) |
|
1955 | 1956 | except OSError as inst: |
|
1956 | 1957 | self.ui.warn(_("trouble committing %s!\n") % f) |
|
1957 | 1958 | raise |
|
1958 | 1959 | except IOError as inst: |
|
1959 | 1960 | errcode = getattr(inst, 'errno', errno.ENOENT) |
|
1960 | 1961 | if error or errcode and errcode != errno.ENOENT: |
|
1961 | 1962 | self.ui.warn(_("trouble committing %s!\n") % f) |
|
1962 | 1963 | raise |
|
1963 | 1964 | |
|
1964 | 1965 | # update manifest |
|
1965 | 1966 | self.ui.note(_("committing manifest\n")) |
|
1966 | 1967 | removed = [f for f in sorted(removed) if f in m1 or f in m2] |
|
1967 | 1968 | drop = [f for f in removed if f in m] |
|
1968 | 1969 | for f in drop: |
|
1969 | 1970 | del m[f] |
|
1970 | 1971 | mn = mctx.write(trp, linkrev, |
|
1971 | 1972 | p1.manifestnode(), p2.manifestnode(), |
|
1972 | 1973 | added, drop) |
|
1973 | 1974 | files = changed + removed |
|
1974 | 1975 | else: |
|
1975 | 1976 | mn = p1.manifestnode() |
|
1976 | 1977 | files = [] |
|
1977 | 1978 | |
|
1978 | 1979 | # update changelog |
|
1979 | 1980 | self.ui.note(_("committing changelog\n")) |
|
1980 | 1981 | self.changelog.delayupdate(tr) |
|
1981 | 1982 | n = self.changelog.add(mn, files, ctx.description(), |
|
1982 | 1983 | trp, p1.node(), p2.node(), |
|
1983 | 1984 | user, ctx.date(), ctx.extra().copy()) |
|
1984 | 1985 | xp1, xp2 = p1.hex(), p2 and p2.hex() or '' |
|
1985 | 1986 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, |
|
1986 | 1987 | parent2=xp2) |
|
1987 | 1988 | # set the new commit is proper phase |
|
1988 | 1989 | targetphase = subrepoutil.newcommitphase(self.ui, ctx) |
|
1989 | 1990 | if targetphase: |
|
1990 | 1991 | # retract boundary do not alter parent changeset. |
|
1991 | 1992 | # if a parent have higher the resulting phase will |
|
1992 | 1993 | # be compliant anyway |
|
1993 | 1994 | # |
|
1994 | 1995 | # if minimal phase was 0 we don't need to retract anything |
|
1995 | 1996 | phases.registernew(self, tr, targetphase, [n]) |
|
1996 | 1997 | tr.close() |
|
1997 | 1998 | return n |
|
1998 | 1999 | finally: |
|
1999 | 2000 | if tr: |
|
2000 | 2001 | tr.release() |
|
2001 | 2002 | lock.release() |
|
2002 | 2003 | |
|
2003 | 2004 | @unfilteredmethod |
|
2004 | 2005 | def destroying(self): |
|
2005 | 2006 | '''Inform the repository that nodes are about to be destroyed. |
|
2006 | 2007 | Intended for use by strip and rollback, so there's a common |
|
2007 | 2008 | place for anything that has to be done before destroying history. |
|
2008 | 2009 | |
|
2009 | 2010 | This is mostly useful for saving state that is in memory and waiting |
|
2010 | 2011 | to be flushed when the current lock is released. Because a call to |
|
2011 | 2012 | destroyed is imminent, the repo will be invalidated causing those |
|
2012 | 2013 | changes to stay in memory (waiting for the next unlock), or vanish |
|
2013 | 2014 | completely. |
|
2014 | 2015 | ''' |
|
2015 | 2016 | # When using the same lock to commit and strip, the phasecache is left |
|
2016 | 2017 | # dirty after committing. Then when we strip, the repo is invalidated, |
|
2017 | 2018 | # causing those changes to disappear. |
|
2018 | 2019 | if '_phasecache' in vars(self): |
|
2019 | 2020 | self._phasecache.write() |
|
2020 | 2021 | |
|
2021 | 2022 | @unfilteredmethod |
|
2022 | 2023 | def destroyed(self): |
|
2023 | 2024 | '''Inform the repository that nodes have been destroyed. |
|
2024 | 2025 | Intended for use by strip and rollback, so there's a common |
|
2025 | 2026 | place for anything that has to be done after destroying history. |
|
2026 | 2027 | ''' |
|
2027 | 2028 | # When one tries to: |
|
2028 | 2029 | # 1) destroy nodes thus calling this method (e.g. strip) |
|
2029 | 2030 | # 2) use phasecache somewhere (e.g. commit) |
|
2030 | 2031 | # |
|
2031 | 2032 | # then 2) will fail because the phasecache contains nodes that were |
|
2032 | 2033 | # removed. We can either remove phasecache from the filecache, |
|
2033 | 2034 | # causing it to reload next time it is accessed, or simply filter |
|
2034 | 2035 | # the removed nodes now and write the updated cache. |
|
2035 | 2036 | self._phasecache.filterunknown(self) |
|
2036 | 2037 | self._phasecache.write() |
|
2037 | 2038 | |
|
2038 | 2039 | # refresh all repository caches |
|
2039 | 2040 | self.updatecaches() |
|
2040 | 2041 | |
|
2041 | 2042 | # Ensure the persistent tag cache is updated. Doing it now |
|
2042 | 2043 | # means that the tag cache only has to worry about destroyed |
|
2043 | 2044 | # heads immediately after a strip/rollback. That in turn |
|
2044 | 2045 | # guarantees that "cachetip == currenttip" (comparing both rev |
|
2045 | 2046 | # and node) always means no nodes have been added or destroyed. |
|
2046 | 2047 | |
|
2047 | 2048 | # XXX this is suboptimal when qrefresh'ing: we strip the current |
|
2048 | 2049 | # head, refresh the tag cache, then immediately add a new head. |
|
2049 | 2050 | # But I think doing it this way is necessary for the "instant |
|
2050 | 2051 | # tag cache retrieval" case to work. |
|
2051 | 2052 | self.invalidate() |
|
2052 | 2053 | |
|
2053 | 2054 | def status(self, node1='.', node2=None, match=None, |
|
2054 | 2055 | ignored=False, clean=False, unknown=False, |
|
2055 | 2056 | listsubrepos=False): |
|
2056 | 2057 | '''a convenience method that calls node1.status(node2)''' |
|
2057 | 2058 | return self[node1].status(node2, match, ignored, clean, unknown, |
|
2058 | 2059 | listsubrepos) |
|
2059 | 2060 | |
|
2060 | 2061 | def addpostdsstatus(self, ps): |
|
2061 | 2062 | """Add a callback to run within the wlock, at the point at which status |
|
2062 | 2063 | fixups happen. |
|
2063 | 2064 | |
|
2064 | 2065 | On status completion, callback(wctx, status) will be called with the |
|
2065 | 2066 | wlock held, unless the dirstate has changed from underneath or the wlock |
|
2066 | 2067 | couldn't be grabbed. |
|
2067 | 2068 | |
|
2068 | 2069 | Callbacks should not capture and use a cached copy of the dirstate -- |
|
2069 | 2070 | it might change in the meanwhile. Instead, they should access the |
|
2070 | 2071 | dirstate via wctx.repo().dirstate. |
|
2071 | 2072 | |
|
2072 | 2073 | This list is emptied out after each status run -- extensions should |
|
2073 | 2074 | make sure it adds to this list each time dirstate.status is called. |
|
2074 | 2075 | Extensions should also make sure they don't call this for statuses |
|
2075 | 2076 | that don't involve the dirstate. |
|
2076 | 2077 | """ |
|
2077 | 2078 | |
|
2078 | 2079 | # The list is located here for uniqueness reasons -- it is actually |
|
2079 | 2080 | # managed by the workingctx, but that isn't unique per-repo. |
|
2080 | 2081 | self._postdsstatus.append(ps) |
|
2081 | 2082 | |
|
2082 | 2083 | def postdsstatus(self): |
|
2083 | 2084 | """Used by workingctx to get the list of post-dirstate-status hooks.""" |
|
2084 | 2085 | return self._postdsstatus |
|
2085 | 2086 | |
|
2086 | 2087 | def clearpostdsstatus(self): |
|
2087 | 2088 | """Used by workingctx to clear post-dirstate-status hooks.""" |
|
2088 | 2089 | del self._postdsstatus[:] |
|
2089 | 2090 | |
|
2090 | 2091 | def heads(self, start=None): |
|
2091 | 2092 | if start is None: |
|
2092 | 2093 | cl = self.changelog |
|
2093 | 2094 | headrevs = reversed(cl.headrevs()) |
|
2094 | 2095 | return [cl.node(rev) for rev in headrevs] |
|
2095 | 2096 | |
|
2096 | 2097 | heads = self.changelog.heads(start) |
|
2097 | 2098 | # sort the output in rev descending order |
|
2098 | 2099 | return sorted(heads, key=self.changelog.rev, reverse=True) |
|
2099 | 2100 | |
|
2100 | 2101 | def branchheads(self, branch=None, start=None, closed=False): |
|
2101 | 2102 | '''return a (possibly filtered) list of heads for the given branch |
|
2102 | 2103 | |
|
2103 | 2104 | Heads are returned in topological order, from newest to oldest. |
|
2104 | 2105 | If branch is None, use the dirstate branch. |
|
2105 | 2106 | If start is not None, return only heads reachable from start. |
|
2106 | 2107 | If closed is True, return heads that are marked as closed as well. |
|
2107 | 2108 | ''' |
|
2108 | 2109 | if branch is None: |
|
2109 | 2110 | branch = self[None].branch() |
|
2110 | 2111 | branches = self.branchmap() |
|
2111 | 2112 | if branch not in branches: |
|
2112 | 2113 | return [] |
|
2113 | 2114 | # the cache returns heads ordered lowest to highest |
|
2114 | 2115 | bheads = list(reversed(branches.branchheads(branch, closed=closed))) |
|
2115 | 2116 | if start is not None: |
|
2116 | 2117 | # filter out the heads that cannot be reached from startrev |
|
2117 | 2118 | fbheads = set(self.changelog.nodesbetween([start], bheads)[2]) |
|
2118 | 2119 | bheads = [h for h in bheads if h in fbheads] |
|
2119 | 2120 | return bheads |
|
2120 | 2121 | |
|
2121 | 2122 | def branches(self, nodes): |
|
2122 | 2123 | if not nodes: |
|
2123 | 2124 | nodes = [self.changelog.tip()] |
|
2124 | 2125 | b = [] |
|
2125 | 2126 | for n in nodes: |
|
2126 | 2127 | t = n |
|
2127 | 2128 | while True: |
|
2128 | 2129 | p = self.changelog.parents(n) |
|
2129 | 2130 | if p[1] != nullid or p[0] == nullid: |
|
2130 | 2131 | b.append((t, n, p[0], p[1])) |
|
2131 | 2132 | break |
|
2132 | 2133 | n = p[0] |
|
2133 | 2134 | return b |
|
2134 | 2135 | |
|
2135 | 2136 | def between(self, pairs): |
|
2136 | 2137 | r = [] |
|
2137 | 2138 | |
|
2138 | 2139 | for top, bottom in pairs: |
|
2139 | 2140 | n, l, i = top, [], 0 |
|
2140 | 2141 | f = 1 |
|
2141 | 2142 | |
|
2142 | 2143 | while n != bottom and n != nullid: |
|
2143 | 2144 | p = self.changelog.parents(n)[0] |
|
2144 | 2145 | if i == f: |
|
2145 | 2146 | l.append(n) |
|
2146 | 2147 | f = f * 2 |
|
2147 | 2148 | n = p |
|
2148 | 2149 | i += 1 |
|
2149 | 2150 | |
|
2150 | 2151 | r.append(l) |
|
2151 | 2152 | |
|
2152 | 2153 | return r |
|
2153 | 2154 | |
|
2154 | 2155 | def checkpush(self, pushop): |
|
2155 | 2156 | """Extensions can override this function if additional checks have |
|
2156 | 2157 | to be performed before pushing, or call it if they override push |
|
2157 | 2158 | command. |
|
2158 | 2159 | """ |
|
2159 | 2160 | |
|
2160 | 2161 | @unfilteredpropertycache |
|
2161 | 2162 | def prepushoutgoinghooks(self): |
|
2162 | 2163 | """Return util.hooks consists of a pushop with repo, remote, outgoing |
|
2163 | 2164 | methods, which are called before pushing changesets. |
|
2164 | 2165 | """ |
|
2165 | 2166 | return util.hooks() |
|
2166 | 2167 | |
|
2167 | 2168 | def pushkey(self, namespace, key, old, new): |
|
2168 | 2169 | try: |
|
2169 | 2170 | tr = self.currenttransaction() |
|
2170 | 2171 | hookargs = {} |
|
2171 | 2172 | if tr is not None: |
|
2172 | 2173 | hookargs.update(tr.hookargs) |
|
2173 | 2174 | hookargs = pycompat.strkwargs(hookargs) |
|
2174 | 2175 | hookargs[r'namespace'] = namespace |
|
2175 | 2176 | hookargs[r'key'] = key |
|
2176 | 2177 | hookargs[r'old'] = old |
|
2177 | 2178 | hookargs[r'new'] = new |
|
2178 | 2179 | self.hook('prepushkey', throw=True, **hookargs) |
|
2179 | 2180 | except error.HookAbort as exc: |
|
2180 | 2181 | self.ui.write_err(_("pushkey-abort: %s\n") % exc) |
|
2181 | 2182 | if exc.hint: |
|
2182 | 2183 | self.ui.write_err(_("(%s)\n") % exc.hint) |
|
2183 | 2184 | return False |
|
2184 | 2185 | self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key)) |
|
2185 | 2186 | ret = pushkey.push(self, namespace, key, old, new) |
|
2186 | 2187 | def runhook(): |
|
2187 | 2188 | self.hook('pushkey', namespace=namespace, key=key, old=old, new=new, |
|
2188 | 2189 | ret=ret) |
|
2189 | 2190 | self._afterlock(runhook) |
|
2190 | 2191 | return ret |
|
2191 | 2192 | |
|
2192 | 2193 | def listkeys(self, namespace): |
|
2193 | 2194 | self.hook('prelistkeys', throw=True, namespace=namespace) |
|
2194 | 2195 | self.ui.debug('listing keys for "%s"\n' % namespace) |
|
2195 | 2196 | values = pushkey.list(self, namespace) |
|
2196 | 2197 | self.hook('listkeys', namespace=namespace, values=values) |
|
2197 | 2198 | return values |
|
2198 | 2199 | |
|
2199 | 2200 | def debugwireargs(self, one, two, three=None, four=None, five=None): |
|
2200 | 2201 | '''used to test argument passing over the wire''' |
|
2201 | 2202 | return "%s %s %s %s %s" % (one, two, three, four, five) |
|
2202 | 2203 | |
|
2203 | 2204 | def savecommitmessage(self, text): |
|
2204 | 2205 | fp = self.vfs('last-message.txt', 'wb') |
|
2205 | 2206 | try: |
|
2206 | 2207 | fp.write(text) |
|
2207 | 2208 | finally: |
|
2208 | 2209 | fp.close() |
|
2209 | 2210 | return self.pathto(fp.name[len(self.root) + 1:]) |
|
2210 | 2211 | |
|
2211 | 2212 | # used to avoid circular references so destructors work |
|
2212 | 2213 | def aftertrans(files): |
|
2213 | 2214 | renamefiles = [tuple(t) for t in files] |
|
2214 | 2215 | def a(): |
|
2215 | 2216 | for vfs, src, dest in renamefiles: |
|
2216 | 2217 | # if src and dest refer to a same file, vfs.rename is a no-op, |
|
2217 | 2218 | # leaving both src and dest on disk. delete dest to make sure |
|
2218 | 2219 | # the rename couldn't be such a no-op. |
|
2219 | 2220 | vfs.tryunlink(dest) |
|
2220 | 2221 | try: |
|
2221 | 2222 | vfs.rename(src, dest) |
|
2222 | 2223 | except OSError: # journal file does not yet exist |
|
2223 | 2224 | pass |
|
2224 | 2225 | return a |
|
2225 | 2226 | |
|
2226 | 2227 | def undoname(fn): |
|
2227 | 2228 | base, name = os.path.split(fn) |
|
2228 | 2229 | assert name.startswith('journal') |
|
2229 | 2230 | return os.path.join(base, name.replace('journal', 'undo', 1)) |
|
2230 | 2231 | |
|
2231 | 2232 | def instance(ui, path, create): |
|
2232 | 2233 | return localrepository(ui, util.urllocalpath(path), create) |
|
2233 | 2234 | |
|
2234 | 2235 | def islocal(path): |
|
2235 | 2236 | return True |
|
2236 | 2237 | |
|
2237 | 2238 | def newreporequirements(repo): |
|
2238 | 2239 | """Determine the set of requirements for a new local repository. |
|
2239 | 2240 | |
|
2240 | 2241 | Extensions can wrap this function to specify custom requirements for |
|
2241 | 2242 | new repositories. |
|
2242 | 2243 | """ |
|
2243 | 2244 | ui = repo.ui |
|
2244 | 2245 | requirements = {'revlogv1'} |
|
2245 | 2246 | if ui.configbool('format', 'usestore'): |
|
2246 | 2247 | requirements.add('store') |
|
2247 | 2248 | if ui.configbool('format', 'usefncache'): |
|
2248 | 2249 | requirements.add('fncache') |
|
2249 | 2250 | if ui.configbool('format', 'dotencode'): |
|
2250 | 2251 | requirements.add('dotencode') |
|
2251 | 2252 | |
|
2252 | 2253 | compengine = ui.config('experimental', 'format.compression') |
|
2253 | 2254 | if compengine not in util.compengines: |
|
2254 | 2255 | raise error.Abort(_('compression engine %s defined by ' |
|
2255 | 2256 | 'experimental.format.compression not available') % |
|
2256 | 2257 | compengine, |
|
2257 | 2258 | hint=_('run "hg debuginstall" to list available ' |
|
2258 | 2259 | 'compression engines')) |
|
2259 | 2260 | |
|
2260 | 2261 | # zlib is the historical default and doesn't need an explicit requirement. |
|
2261 | 2262 | if compengine != 'zlib': |
|
2262 | 2263 | requirements.add('exp-compression-%s' % compengine) |
|
2263 | 2264 | |
|
2264 | 2265 | if scmutil.gdinitconfig(ui): |
|
2265 | 2266 | requirements.add('generaldelta') |
|
2266 | 2267 | if ui.configbool('experimental', 'treemanifest'): |
|
2267 | 2268 | requirements.add('treemanifest') |
|
2268 | 2269 | |
|
2269 | 2270 | revlogv2 = ui.config('experimental', 'revlogv2') |
|
2270 | 2271 | if revlogv2 == 'enable-unstable-format-and-corrupt-my-data': |
|
2271 | 2272 | requirements.remove('revlogv1') |
|
2272 | 2273 | # generaldelta is implied by revlogv2. |
|
2273 | 2274 | requirements.discard('generaldelta') |
|
2274 | 2275 | requirements.add(REVLOGV2_REQUIREMENT) |
|
2275 | 2276 | |
|
2276 | 2277 | return requirements |
@@ -1,1418 +1,1418 b'' | |||
|
1 | 1 | # scmutil.py - Mercurial core utility functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import glob |
|
12 | 12 | import hashlib |
|
13 | 13 | import os |
|
14 | 14 | import re |
|
15 | 15 | import socket |
|
16 | 16 | import subprocess |
|
17 | 17 | import weakref |
|
18 | 18 | |
|
19 | 19 | from .i18n import _ |
|
20 | 20 | from .node import ( |
|
21 | 21 | hex, |
|
22 | 22 | nullid, |
|
23 | 23 | short, |
|
24 | 24 | wdirid, |
|
25 | 25 | wdirrev, |
|
26 | 26 | ) |
|
27 | 27 | |
|
28 | 28 | from . import ( |
|
29 | 29 | encoding, |
|
30 | 30 | error, |
|
31 | 31 | match as matchmod, |
|
32 | 32 | obsolete, |
|
33 | 33 | obsutil, |
|
34 | 34 | pathutil, |
|
35 | 35 | phases, |
|
36 | 36 | pycompat, |
|
37 | 37 | revsetlang, |
|
38 | 38 | similar, |
|
39 | 39 | url, |
|
40 | 40 | util, |
|
41 | 41 | vfs, |
|
42 | 42 | ) |
|
43 | 43 | |
|
44 | 44 | if pycompat.iswindows: |
|
45 | 45 | from . import scmwindows as scmplatform |
|
46 | 46 | else: |
|
47 | 47 | from . import scmposix as scmplatform |
|
48 | 48 | |
|
49 | 49 | termsize = scmplatform.termsize |
|
50 | 50 | |
|
51 | 51 | class status(tuple): |
|
52 | 52 | '''Named tuple with a list of files per status. The 'deleted', 'unknown' |
|
53 | 53 | and 'ignored' properties are only relevant to the working copy. |
|
54 | 54 | ''' |
|
55 | 55 | |
|
56 | 56 | __slots__ = () |
|
57 | 57 | |
|
58 | 58 | def __new__(cls, modified, added, removed, deleted, unknown, ignored, |
|
59 | 59 | clean): |
|
60 | 60 | return tuple.__new__(cls, (modified, added, removed, deleted, unknown, |
|
61 | 61 | ignored, clean)) |
|
62 | 62 | |
|
63 | 63 | @property |
|
64 | 64 | def modified(self): |
|
65 | 65 | '''files that have been modified''' |
|
66 | 66 | return self[0] |
|
67 | 67 | |
|
68 | 68 | @property |
|
69 | 69 | def added(self): |
|
70 | 70 | '''files that have been added''' |
|
71 | 71 | return self[1] |
|
72 | 72 | |
|
73 | 73 | @property |
|
74 | 74 | def removed(self): |
|
75 | 75 | '''files that have been removed''' |
|
76 | 76 | return self[2] |
|
77 | 77 | |
|
78 | 78 | @property |
|
79 | 79 | def deleted(self): |
|
80 | 80 | '''files that are in the dirstate, but have been deleted from the |
|
81 | 81 | working copy (aka "missing") |
|
82 | 82 | ''' |
|
83 | 83 | return self[3] |
|
84 | 84 | |
|
85 | 85 | @property |
|
86 | 86 | def unknown(self): |
|
87 | 87 | '''files not in the dirstate that are not ignored''' |
|
88 | 88 | return self[4] |
|
89 | 89 | |
|
90 | 90 | @property |
|
91 | 91 | def ignored(self): |
|
92 | 92 | '''files not in the dirstate that are ignored (by _dirignore())''' |
|
93 | 93 | return self[5] |
|
94 | 94 | |
|
95 | 95 | @property |
|
96 | 96 | def clean(self): |
|
97 | 97 | '''files that have not been modified''' |
|
98 | 98 | return self[6] |
|
99 | 99 | |
|
100 | 100 | def __repr__(self, *args, **kwargs): |
|
101 | 101 | return (('<status modified=%r, added=%r, removed=%r, deleted=%r, ' |
|
102 | 102 | 'unknown=%r, ignored=%r, clean=%r>') % self) |
|
103 | 103 | |
|
104 | 104 | def itersubrepos(ctx1, ctx2): |
|
105 | 105 | """find subrepos in ctx1 or ctx2""" |
|
106 | 106 | # Create a (subpath, ctx) mapping where we prefer subpaths from |
|
107 | 107 | # ctx1. The subpaths from ctx2 are important when the .hgsub file |
|
108 | 108 | # has been modified (in ctx2) but not yet committed (in ctx1). |
|
109 | 109 | subpaths = dict.fromkeys(ctx2.substate, ctx2) |
|
110 | 110 | subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) |
|
111 | 111 | |
|
112 | 112 | missing = set() |
|
113 | 113 | |
|
114 | 114 | for subpath in ctx2.substate: |
|
115 | 115 | if subpath not in ctx1.substate: |
|
116 | 116 | del subpaths[subpath] |
|
117 | 117 | missing.add(subpath) |
|
118 | 118 | |
|
119 | 119 | for subpath, ctx in sorted(subpaths.iteritems()): |
|
120 | 120 | yield subpath, ctx.sub(subpath) |
|
121 | 121 | |
|
122 | 122 | # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way, |
|
123 | 123 | # status and diff will have an accurate result when it does |
|
124 | 124 | # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared |
|
125 | 125 | # against itself. |
|
126 | 126 | for subpath in missing: |
|
127 | 127 | yield subpath, ctx2.nullsub(subpath, ctx1) |
|
128 | 128 | |
|
129 | 129 | def nochangesfound(ui, repo, excluded=None): |
|
130 | 130 | '''Report no changes for push/pull, excluded is None or a list of |
|
131 | 131 | nodes excluded from the push/pull. |
|
132 | 132 | ''' |
|
133 | 133 | secretlist = [] |
|
134 | 134 | if excluded: |
|
135 | 135 | for n in excluded: |
|
136 | 136 | ctx = repo[n] |
|
137 | 137 | if ctx.phase() >= phases.secret and not ctx.extinct(): |
|
138 | 138 | secretlist.append(n) |
|
139 | 139 | |
|
140 | 140 | if secretlist: |
|
141 | 141 | ui.status(_("no changes found (ignored %d secret changesets)\n") |
|
142 | 142 | % len(secretlist)) |
|
143 | 143 | else: |
|
144 | 144 | ui.status(_("no changes found\n")) |
|
145 | 145 | |
|
146 | 146 | def callcatch(ui, func): |
|
147 | 147 | """call func() with global exception handling |
|
148 | 148 | |
|
149 | 149 | return func() if no exception happens. otherwise do some error handling |
|
150 | 150 | and return an exit code accordingly. does not handle all exceptions. |
|
151 | 151 | """ |
|
152 | 152 | try: |
|
153 | 153 | try: |
|
154 | 154 | return func() |
|
155 | 155 | except: # re-raises |
|
156 | 156 | ui.traceback() |
|
157 | 157 | raise |
|
158 | 158 | # Global exception handling, alphabetically |
|
159 | 159 | # Mercurial-specific first, followed by built-in and library exceptions |
|
160 | 160 | except error.LockHeld as inst: |
|
161 | 161 | if inst.errno == errno.ETIMEDOUT: |
|
162 | 162 | reason = _('timed out waiting for lock held by %r') % inst.locker |
|
163 | 163 | else: |
|
164 | 164 | reason = _('lock held by %r') % inst.locker |
|
165 | 165 | ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason)) |
|
166 | 166 | if not inst.locker: |
|
167 | 167 | ui.warn(_("(lock might be very busy)\n")) |
|
168 | 168 | except error.LockUnavailable as inst: |
|
169 | 169 | ui.warn(_("abort: could not lock %s: %s\n") % |
|
170 | 170 | (inst.desc or inst.filename, |
|
171 | 171 | encoding.strtolocal(inst.strerror))) |
|
172 | 172 | except error.OutOfBandError as inst: |
|
173 | 173 | if inst.args: |
|
174 | 174 | msg = _("abort: remote error:\n") |
|
175 | 175 | else: |
|
176 | 176 | msg = _("abort: remote error\n") |
|
177 | 177 | ui.warn(msg) |
|
178 | 178 | if inst.args: |
|
179 | 179 | ui.warn(''.join(inst.args)) |
|
180 | 180 | if inst.hint: |
|
181 | 181 | ui.warn('(%s)\n' % inst.hint) |
|
182 | 182 | except error.RepoError as inst: |
|
183 | 183 | ui.warn(_("abort: %s!\n") % inst) |
|
184 | 184 | if inst.hint: |
|
185 | 185 | ui.warn(_("(%s)\n") % inst.hint) |
|
186 | 186 | except error.ResponseError as inst: |
|
187 | 187 | ui.warn(_("abort: %s") % inst.args[0]) |
|
188 | 188 | if not isinstance(inst.args[1], basestring): |
|
189 | 189 | ui.warn(" %r\n" % (inst.args[1],)) |
|
190 | 190 | elif not inst.args[1]: |
|
191 | 191 | ui.warn(_(" empty string\n")) |
|
192 | 192 | else: |
|
193 | 193 | ui.warn("\n%r\n" % util.ellipsis(inst.args[1])) |
|
194 | 194 | except error.CensoredNodeError as inst: |
|
195 | 195 | ui.warn(_("abort: file censored %s!\n") % inst) |
|
196 | 196 | except error.RevlogError as inst: |
|
197 | 197 | ui.warn(_("abort: %s!\n") % inst) |
|
198 | 198 | except error.InterventionRequired as inst: |
|
199 | 199 | ui.warn("%s\n" % inst) |
|
200 | 200 | if inst.hint: |
|
201 | 201 | ui.warn(_("(%s)\n") % inst.hint) |
|
202 | 202 | return 1 |
|
203 | 203 | except error.WdirUnsupported: |
|
204 | 204 | ui.warn(_("abort: working directory revision cannot be specified\n")) |
|
205 | 205 | except error.Abort as inst: |
|
206 | 206 | ui.warn(_("abort: %s\n") % inst) |
|
207 | 207 | if inst.hint: |
|
208 | 208 | ui.warn(_("(%s)\n") % inst.hint) |
|
209 | 209 | except ImportError as inst: |
|
210 | 210 | ui.warn(_("abort: %s!\n") % inst) |
|
211 | m = str(inst).split()[-1] | |
|
211 | m = util.forcebytestr(inst).split()[-1] | |
|
212 | 212 | if m in "mpatch bdiff".split(): |
|
213 | 213 | ui.warn(_("(did you forget to compile extensions?)\n")) |
|
214 | 214 | elif m in "zlib".split(): |
|
215 | 215 | ui.warn(_("(is your Python install correct?)\n")) |
|
216 | 216 | except IOError as inst: |
|
217 | 217 | if util.safehasattr(inst, "code"): |
|
218 | 218 | ui.warn(_("abort: %s\n") % util.forcebytestr(inst)) |
|
219 | 219 | elif util.safehasattr(inst, "reason"): |
|
220 | 220 | try: # usually it is in the form (errno, strerror) |
|
221 | 221 | reason = inst.reason.args[1] |
|
222 | 222 | except (AttributeError, IndexError): |
|
223 | 223 | # it might be anything, for example a string |
|
224 | 224 | reason = inst.reason |
|
225 | 225 | if isinstance(reason, unicode): |
|
226 | 226 | # SSLError of Python 2.7.9 contains a unicode |
|
227 | 227 | reason = encoding.unitolocal(reason) |
|
228 | 228 | ui.warn(_("abort: error: %s\n") % reason) |
|
229 | 229 | elif (util.safehasattr(inst, "args") |
|
230 | 230 | and inst.args and inst.args[0] == errno.EPIPE): |
|
231 | 231 | pass |
|
232 | 232 | elif getattr(inst, "strerror", None): |
|
233 | 233 | if getattr(inst, "filename", None): |
|
234 | 234 | ui.warn(_("abort: %s: %s\n") % ( |
|
235 | 235 | encoding.strtolocal(inst.strerror), inst.filename)) |
|
236 | 236 | else: |
|
237 | 237 | ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) |
|
238 | 238 | else: |
|
239 | 239 | raise |
|
240 | 240 | except OSError as inst: |
|
241 | 241 | if getattr(inst, "filename", None) is not None: |
|
242 | 242 | ui.warn(_("abort: %s: '%s'\n") % ( |
|
243 | 243 | encoding.strtolocal(inst.strerror), inst.filename)) |
|
244 | 244 | else: |
|
245 | 245 | ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) |
|
246 | 246 | except MemoryError: |
|
247 | 247 | ui.warn(_("abort: out of memory\n")) |
|
248 | 248 | except SystemExit as inst: |
|
249 | 249 | # Commands shouldn't sys.exit directly, but give a return code. |
|
250 | 250 | # Just in case catch this and and pass exit code to caller. |
|
251 | 251 | return inst.code |
|
252 | 252 | except socket.error as inst: |
|
253 | 253 | ui.warn(_("abort: %s\n") % inst.args[-1]) |
|
254 | 254 | |
|
255 | 255 | return -1 |
|
256 | 256 | |
|
257 | 257 | def checknewlabel(repo, lbl, kind): |
|
258 | 258 | # Do not use the "kind" parameter in ui output. |
|
259 | 259 | # It makes strings difficult to translate. |
|
260 | 260 | if lbl in ['tip', '.', 'null']: |
|
261 | 261 | raise error.Abort(_("the name '%s' is reserved") % lbl) |
|
262 | 262 | for c in (':', '\0', '\n', '\r'): |
|
263 | 263 | if c in lbl: |
|
264 | 264 | raise error.Abort(_("%r cannot be used in a name") % c) |
|
265 | 265 | try: |
|
266 | 266 | int(lbl) |
|
267 | 267 | raise error.Abort(_("cannot use an integer as a name")) |
|
268 | 268 | except ValueError: |
|
269 | 269 | pass |
|
270 | 270 | if lbl.strip() != lbl: |
|
271 | 271 | raise error.Abort(_("leading or trailing whitespace in name %r") % lbl) |
|
272 | 272 | |
|
273 | 273 | def checkfilename(f): |
|
274 | 274 | '''Check that the filename f is an acceptable filename for a tracked file''' |
|
275 | 275 | if '\r' in f or '\n' in f: |
|
276 | 276 | raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f) |
|
277 | 277 | |
|
278 | 278 | def checkportable(ui, f): |
|
279 | 279 | '''Check if filename f is portable and warn or abort depending on config''' |
|
280 | 280 | checkfilename(f) |
|
281 | 281 | abort, warn = checkportabilityalert(ui) |
|
282 | 282 | if abort or warn: |
|
283 | 283 | msg = util.checkwinfilename(f) |
|
284 | 284 | if msg: |
|
285 | 285 | msg = "%s: %s" % (msg, util.shellquote(f)) |
|
286 | 286 | if abort: |
|
287 | 287 | raise error.Abort(msg) |
|
288 | 288 | ui.warn(_("warning: %s\n") % msg) |
|
289 | 289 | |
|
290 | 290 | def checkportabilityalert(ui): |
|
291 | 291 | '''check if the user's config requests nothing, a warning, or abort for |
|
292 | 292 | non-portable filenames''' |
|
293 | 293 | val = ui.config('ui', 'portablefilenames') |
|
294 | 294 | lval = val.lower() |
|
295 | 295 | bval = util.parsebool(val) |
|
296 | 296 | abort = pycompat.iswindows or lval == 'abort' |
|
297 | 297 | warn = bval or lval == 'warn' |
|
298 | 298 | if bval is None and not (warn or abort or lval == 'ignore'): |
|
299 | 299 | raise error.ConfigError( |
|
300 | 300 | _("ui.portablefilenames value is invalid ('%s')") % val) |
|
301 | 301 | return abort, warn |
|
302 | 302 | |
|
303 | 303 | class casecollisionauditor(object): |
|
304 | 304 | def __init__(self, ui, abort, dirstate): |
|
305 | 305 | self._ui = ui |
|
306 | 306 | self._abort = abort |
|
307 | 307 | allfiles = '\0'.join(dirstate._map) |
|
308 | 308 | self._loweredfiles = set(encoding.lower(allfiles).split('\0')) |
|
309 | 309 | self._dirstate = dirstate |
|
310 | 310 | # The purpose of _newfiles is so that we don't complain about |
|
311 | 311 | # case collisions if someone were to call this object with the |
|
312 | 312 | # same filename twice. |
|
313 | 313 | self._newfiles = set() |
|
314 | 314 | |
|
315 | 315 | def __call__(self, f): |
|
316 | 316 | if f in self._newfiles: |
|
317 | 317 | return |
|
318 | 318 | fl = encoding.lower(f) |
|
319 | 319 | if fl in self._loweredfiles and f not in self._dirstate: |
|
320 | 320 | msg = _('possible case-folding collision for %s') % f |
|
321 | 321 | if self._abort: |
|
322 | 322 | raise error.Abort(msg) |
|
323 | 323 | self._ui.warn(_("warning: %s\n") % msg) |
|
324 | 324 | self._loweredfiles.add(fl) |
|
325 | 325 | self._newfiles.add(f) |
|
326 | 326 | |
|
327 | 327 | def filteredhash(repo, maxrev): |
|
328 | 328 | """build hash of filtered revisions in the current repoview. |
|
329 | 329 | |
|
330 | 330 | Multiple caches perform up-to-date validation by checking that the |
|
331 | 331 | tiprev and tipnode stored in the cache file match the current repository. |
|
332 | 332 | However, this is not sufficient for validating repoviews because the set |
|
333 | 333 | of revisions in the view may change without the repository tiprev and |
|
334 | 334 | tipnode changing. |
|
335 | 335 | |
|
336 | 336 | This function hashes all the revs filtered from the view and returns |
|
337 | 337 | that SHA-1 digest. |
|
338 | 338 | """ |
|
339 | 339 | cl = repo.changelog |
|
340 | 340 | if not cl.filteredrevs: |
|
341 | 341 | return None |
|
342 | 342 | key = None |
|
343 | 343 | revs = sorted(r for r in cl.filteredrevs if r <= maxrev) |
|
344 | 344 | if revs: |
|
345 | 345 | s = hashlib.sha1() |
|
346 | 346 | for rev in revs: |
|
347 | 347 | s.update('%d;' % rev) |
|
348 | 348 | key = s.digest() |
|
349 | 349 | return key |
|
350 | 350 | |
|
351 | 351 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): |
|
352 | 352 | '''yield every hg repository under path, always recursively. |
|
353 | 353 | The recurse flag will only control recursion into repo working dirs''' |
|
354 | 354 | def errhandler(err): |
|
355 | 355 | if err.filename == path: |
|
356 | 356 | raise err |
|
357 | 357 | samestat = getattr(os.path, 'samestat', None) |
|
358 | 358 | if followsym and samestat is not None: |
|
359 | 359 | def adddir(dirlst, dirname): |
|
360 | 360 | dirstat = os.stat(dirname) |
|
361 | 361 | match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst) |
|
362 | 362 | if not match: |
|
363 | 363 | dirlst.append(dirstat) |
|
364 | 364 | return not match |
|
365 | 365 | else: |
|
366 | 366 | followsym = False |
|
367 | 367 | |
|
368 | 368 | if (seen_dirs is None) and followsym: |
|
369 | 369 | seen_dirs = [] |
|
370 | 370 | adddir(seen_dirs, path) |
|
371 | 371 | for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): |
|
372 | 372 | dirs.sort() |
|
373 | 373 | if '.hg' in dirs: |
|
374 | 374 | yield root # found a repository |
|
375 | 375 | qroot = os.path.join(root, '.hg', 'patches') |
|
376 | 376 | if os.path.isdir(os.path.join(qroot, '.hg')): |
|
377 | 377 | yield qroot # we have a patch queue repo here |
|
378 | 378 | if recurse: |
|
379 | 379 | # avoid recursing inside the .hg directory |
|
380 | 380 | dirs.remove('.hg') |
|
381 | 381 | else: |
|
382 | 382 | dirs[:] = [] # don't descend further |
|
383 | 383 | elif followsym: |
|
384 | 384 | newdirs = [] |
|
385 | 385 | for d in dirs: |
|
386 | 386 | fname = os.path.join(root, d) |
|
387 | 387 | if adddir(seen_dirs, fname): |
|
388 | 388 | if os.path.islink(fname): |
|
389 | 389 | for hgname in walkrepos(fname, True, seen_dirs): |
|
390 | 390 | yield hgname |
|
391 | 391 | else: |
|
392 | 392 | newdirs.append(d) |
|
393 | 393 | dirs[:] = newdirs |
|
394 | 394 | |
|
395 | 395 | def binnode(ctx): |
|
396 | 396 | """Return binary node id for a given basectx""" |
|
397 | 397 | node = ctx.node() |
|
398 | 398 | if node is None: |
|
399 | 399 | return wdirid |
|
400 | 400 | return node |
|
401 | 401 | |
|
402 | 402 | def intrev(ctx): |
|
403 | 403 | """Return integer for a given basectx that can be used in comparison or |
|
404 | 404 | arithmetic operation""" |
|
405 | 405 | rev = ctx.rev() |
|
406 | 406 | if rev is None: |
|
407 | 407 | return wdirrev |
|
408 | 408 | return rev |
|
409 | 409 | |
|
410 | 410 | def formatchangeid(ctx): |
|
411 | 411 | """Format changectx as '{rev}:{node|formatnode}', which is the default |
|
412 | 412 | template provided by logcmdutil.changesettemplater""" |
|
413 | 413 | repo = ctx.repo() |
|
414 | 414 | return formatrevnode(repo.ui, intrev(ctx), binnode(ctx)) |
|
415 | 415 | |
|
416 | 416 | def formatrevnode(ui, rev, node): |
|
417 | 417 | """Format given revision and node depending on the current verbosity""" |
|
418 | 418 | if ui.debugflag: |
|
419 | 419 | hexfunc = hex |
|
420 | 420 | else: |
|
421 | 421 | hexfunc = short |
|
422 | 422 | return '%d:%s' % (rev, hexfunc(node)) |
|
423 | 423 | |
|
424 | 424 | def revsingle(repo, revspec, default='.', localalias=None): |
|
425 | 425 | if not revspec and revspec != 0: |
|
426 | 426 | return repo[default] |
|
427 | 427 | |
|
428 | 428 | l = revrange(repo, [revspec], localalias=localalias) |
|
429 | 429 | if not l: |
|
430 | 430 | raise error.Abort(_('empty revision set')) |
|
431 | 431 | return repo[l.last()] |
|
432 | 432 | |
|
433 | 433 | def _pairspec(revspec): |
|
434 | 434 | tree = revsetlang.parse(revspec) |
|
435 | 435 | return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall') |
|
436 | 436 | |
|
437 | 437 | def revpair(repo, revs): |
|
438 | 438 | if not revs: |
|
439 | 439 | return repo.dirstate.p1(), None |
|
440 | 440 | |
|
441 | 441 | l = revrange(repo, revs) |
|
442 | 442 | |
|
443 | 443 | if not l: |
|
444 | 444 | first = second = None |
|
445 | 445 | elif l.isascending(): |
|
446 | 446 | first = l.min() |
|
447 | 447 | second = l.max() |
|
448 | 448 | elif l.isdescending(): |
|
449 | 449 | first = l.max() |
|
450 | 450 | second = l.min() |
|
451 | 451 | else: |
|
452 | 452 | first = l.first() |
|
453 | 453 | second = l.last() |
|
454 | 454 | |
|
455 | 455 | if first is None: |
|
456 | 456 | raise error.Abort(_('empty revision range')) |
|
457 | 457 | if (first == second and len(revs) >= 2 |
|
458 | 458 | and not all(revrange(repo, [r]) for r in revs)): |
|
459 | 459 | raise error.Abort(_('empty revision on one side of range')) |
|
460 | 460 | |
|
461 | 461 | # if top-level is range expression, the result must always be a pair |
|
462 | 462 | if first == second and len(revs) == 1 and not _pairspec(revs[0]): |
|
463 | 463 | return repo.lookup(first), None |
|
464 | 464 | |
|
465 | 465 | return repo.lookup(first), repo.lookup(second) |
|
466 | 466 | |
|
467 | 467 | def revrange(repo, specs, localalias=None): |
|
468 | 468 | """Execute 1 to many revsets and return the union. |
|
469 | 469 | |
|
470 | 470 | This is the preferred mechanism for executing revsets using user-specified |
|
471 | 471 | config options, such as revset aliases. |
|
472 | 472 | |
|
473 | 473 | The revsets specified by ``specs`` will be executed via a chained ``OR`` |
|
474 | 474 | expression. If ``specs`` is empty, an empty result is returned. |
|
475 | 475 | |
|
476 | 476 | ``specs`` can contain integers, in which case they are assumed to be |
|
477 | 477 | revision numbers. |
|
478 | 478 | |
|
479 | 479 | It is assumed the revsets are already formatted. If you have arguments |
|
480 | 480 | that need to be expanded in the revset, call ``revsetlang.formatspec()`` |
|
481 | 481 | and pass the result as an element of ``specs``. |
|
482 | 482 | |
|
483 | 483 | Specifying a single revset is allowed. |
|
484 | 484 | |
|
485 | 485 | Returns a ``revset.abstractsmartset`` which is a list-like interface over |
|
486 | 486 | integer revisions. |
|
487 | 487 | """ |
|
488 | 488 | allspecs = [] |
|
489 | 489 | for spec in specs: |
|
490 | 490 | if isinstance(spec, int): |
|
491 | 491 | spec = revsetlang.formatspec('rev(%d)', spec) |
|
492 | 492 | allspecs.append(spec) |
|
493 | 493 | return repo.anyrevs(allspecs, user=True, localalias=localalias) |
|
494 | 494 | |
|
495 | 495 | def meaningfulparents(repo, ctx): |
|
496 | 496 | """Return list of meaningful (or all if debug) parentrevs for rev. |
|
497 | 497 | |
|
498 | 498 | For merges (two non-nullrev revisions) both parents are meaningful. |
|
499 | 499 | Otherwise the first parent revision is considered meaningful if it |
|
500 | 500 | is not the preceding revision. |
|
501 | 501 | """ |
|
502 | 502 | parents = ctx.parents() |
|
503 | 503 | if len(parents) > 1: |
|
504 | 504 | return parents |
|
505 | 505 | if repo.ui.debugflag: |
|
506 | 506 | return [parents[0], repo['null']] |
|
507 | 507 | if parents[0].rev() >= intrev(ctx) - 1: |
|
508 | 508 | return [] |
|
509 | 509 | return parents |
|
510 | 510 | |
|
511 | 511 | def expandpats(pats): |
|
512 | 512 | '''Expand bare globs when running on windows. |
|
513 | 513 | On posix we assume it already has already been done by sh.''' |
|
514 | 514 | if not util.expandglobs: |
|
515 | 515 | return list(pats) |
|
516 | 516 | ret = [] |
|
517 | 517 | for kindpat in pats: |
|
518 | 518 | kind, pat = matchmod._patsplit(kindpat, None) |
|
519 | 519 | if kind is None: |
|
520 | 520 | try: |
|
521 | 521 | globbed = glob.glob(pat) |
|
522 | 522 | except re.error: |
|
523 | 523 | globbed = [pat] |
|
524 | 524 | if globbed: |
|
525 | 525 | ret.extend(globbed) |
|
526 | 526 | continue |
|
527 | 527 | ret.append(kindpat) |
|
528 | 528 | return ret |
|
529 | 529 | |
|
530 | 530 | def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath', |
|
531 | 531 | badfn=None): |
|
532 | 532 | '''Return a matcher and the patterns that were used. |
|
533 | 533 | The matcher will warn about bad matches, unless an alternate badfn callback |
|
534 | 534 | is provided.''' |
|
535 | 535 | if pats == ("",): |
|
536 | 536 | pats = [] |
|
537 | 537 | if opts is None: |
|
538 | 538 | opts = {} |
|
539 | 539 | if not globbed and default == 'relpath': |
|
540 | 540 | pats = expandpats(pats or []) |
|
541 | 541 | |
|
542 | 542 | def bad(f, msg): |
|
543 | 543 | ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg)) |
|
544 | 544 | |
|
545 | 545 | if badfn is None: |
|
546 | 546 | badfn = bad |
|
547 | 547 | |
|
548 | 548 | m = ctx.match(pats, opts.get('include'), opts.get('exclude'), |
|
549 | 549 | default, listsubrepos=opts.get('subrepos'), badfn=badfn) |
|
550 | 550 | |
|
551 | 551 | if m.always(): |
|
552 | 552 | pats = [] |
|
553 | 553 | return m, pats |
|
554 | 554 | |
|
555 | 555 | def match(ctx, pats=(), opts=None, globbed=False, default='relpath', |
|
556 | 556 | badfn=None): |
|
557 | 557 | '''Return a matcher that will warn about bad matches.''' |
|
558 | 558 | return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0] |
|
559 | 559 | |
|
560 | 560 | def matchall(repo): |
|
561 | 561 | '''Return a matcher that will efficiently match everything.''' |
|
562 | 562 | return matchmod.always(repo.root, repo.getcwd()) |
|
563 | 563 | |
|
564 | 564 | def matchfiles(repo, files, badfn=None): |
|
565 | 565 | '''Return a matcher that will efficiently match exactly these files.''' |
|
566 | 566 | return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn) |
|
567 | 567 | |
|
568 | 568 | def parsefollowlinespattern(repo, rev, pat, msg): |
|
569 | 569 | """Return a file name from `pat` pattern suitable for usage in followlines |
|
570 | 570 | logic. |
|
571 | 571 | """ |
|
572 | 572 | if not matchmod.patkind(pat): |
|
573 | 573 | return pathutil.canonpath(repo.root, repo.getcwd(), pat) |
|
574 | 574 | else: |
|
575 | 575 | ctx = repo[rev] |
|
576 | 576 | m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx) |
|
577 | 577 | files = [f for f in ctx if m(f)] |
|
578 | 578 | if len(files) != 1: |
|
579 | 579 | raise error.ParseError(msg) |
|
580 | 580 | return files[0] |
|
581 | 581 | |
|
582 | 582 | def origpath(ui, repo, filepath): |
|
583 | 583 | '''customize where .orig files are created |
|
584 | 584 | |
|
585 | 585 | Fetch user defined path from config file: [ui] origbackuppath = <path> |
|
586 | 586 | Fall back to default (filepath with .orig suffix) if not specified |
|
587 | 587 | ''' |
|
588 | 588 | origbackuppath = ui.config('ui', 'origbackuppath') |
|
589 | 589 | if not origbackuppath: |
|
590 | 590 | return filepath + ".orig" |
|
591 | 591 | |
|
592 | 592 | # Convert filepath from an absolute path into a path inside the repo. |
|
593 | 593 | filepathfromroot = util.normpath(os.path.relpath(filepath, |
|
594 | 594 | start=repo.root)) |
|
595 | 595 | |
|
596 | 596 | origvfs = vfs.vfs(repo.wjoin(origbackuppath)) |
|
597 | 597 | origbackupdir = origvfs.dirname(filepathfromroot) |
|
598 | 598 | if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir): |
|
599 | 599 | ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir)) |
|
600 | 600 | |
|
601 | 601 | # Remove any files that conflict with the backup file's path |
|
602 | 602 | for f in reversed(list(util.finddirs(filepathfromroot))): |
|
603 | 603 | if origvfs.isfileorlink(f): |
|
604 | 604 | ui.note(_('removing conflicting file: %s\n') |
|
605 | 605 | % origvfs.join(f)) |
|
606 | 606 | origvfs.unlink(f) |
|
607 | 607 | break |
|
608 | 608 | |
|
609 | 609 | origvfs.makedirs(origbackupdir) |
|
610 | 610 | |
|
611 | 611 | if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot): |
|
612 | 612 | ui.note(_('removing conflicting directory: %s\n') |
|
613 | 613 | % origvfs.join(filepathfromroot)) |
|
614 | 614 | origvfs.rmtree(filepathfromroot, forcibly=True) |
|
615 | 615 | |
|
616 | 616 | return origvfs.join(filepathfromroot) |
|
617 | 617 | |
|
618 | 618 | class _containsnode(object): |
|
619 | 619 | """proxy __contains__(node) to container.__contains__ which accepts revs""" |
|
620 | 620 | |
|
621 | 621 | def __init__(self, repo, revcontainer): |
|
622 | 622 | self._torev = repo.changelog.rev |
|
623 | 623 | self._revcontains = revcontainer.__contains__ |
|
624 | 624 | |
|
625 | 625 | def __contains__(self, node): |
|
626 | 626 | return self._revcontains(self._torev(node)) |
|
627 | 627 | |
|
628 | 628 | def cleanupnodes(repo, replacements, operation, moves=None, metadata=None): |
|
629 | 629 | """do common cleanups when old nodes are replaced by new nodes |
|
630 | 630 | |
|
631 | 631 | That includes writing obsmarkers or stripping nodes, and moving bookmarks. |
|
632 | 632 | (we might also want to move working directory parent in the future) |
|
633 | 633 | |
|
634 | 634 | By default, bookmark moves are calculated automatically from 'replacements', |
|
635 | 635 | but 'moves' can be used to override that. Also, 'moves' may include |
|
636 | 636 | additional bookmark moves that should not have associated obsmarkers. |
|
637 | 637 | |
|
638 | 638 | replacements is {oldnode: [newnode]} or a iterable of nodes if they do not |
|
639 | 639 | have replacements. operation is a string, like "rebase". |
|
640 | 640 | |
|
641 | 641 | metadata is dictionary containing metadata to be stored in obsmarker if |
|
642 | 642 | obsolescence is enabled. |
|
643 | 643 | """ |
|
644 | 644 | if not replacements and not moves: |
|
645 | 645 | return |
|
646 | 646 | |
|
647 | 647 | # translate mapping's other forms |
|
648 | 648 | if not util.safehasattr(replacements, 'items'): |
|
649 | 649 | replacements = {n: () for n in replacements} |
|
650 | 650 | |
|
651 | 651 | # Calculate bookmark movements |
|
652 | 652 | if moves is None: |
|
653 | 653 | moves = {} |
|
654 | 654 | # Unfiltered repo is needed since nodes in replacements might be hidden. |
|
655 | 655 | unfi = repo.unfiltered() |
|
656 | 656 | for oldnode, newnodes in replacements.items(): |
|
657 | 657 | if oldnode in moves: |
|
658 | 658 | continue |
|
659 | 659 | if len(newnodes) > 1: |
|
660 | 660 | # usually a split, take the one with biggest rev number |
|
661 | 661 | newnode = next(unfi.set('max(%ln)', newnodes)).node() |
|
662 | 662 | elif len(newnodes) == 0: |
|
663 | 663 | # move bookmark backwards |
|
664 | 664 | roots = list(unfi.set('max((::%n) - %ln)', oldnode, |
|
665 | 665 | list(replacements))) |
|
666 | 666 | if roots: |
|
667 | 667 | newnode = roots[0].node() |
|
668 | 668 | else: |
|
669 | 669 | newnode = nullid |
|
670 | 670 | else: |
|
671 | 671 | newnode = newnodes[0] |
|
672 | 672 | moves[oldnode] = newnode |
|
673 | 673 | |
|
674 | 674 | with repo.transaction('cleanup') as tr: |
|
675 | 675 | # Move bookmarks |
|
676 | 676 | bmarks = repo._bookmarks |
|
677 | 677 | bmarkchanges = [] |
|
678 | 678 | allnewnodes = [n for ns in replacements.values() for n in ns] |
|
679 | 679 | for oldnode, newnode in moves.items(): |
|
680 | 680 | oldbmarks = repo.nodebookmarks(oldnode) |
|
681 | 681 | if not oldbmarks: |
|
682 | 682 | continue |
|
683 | 683 | from . import bookmarks # avoid import cycle |
|
684 | 684 | repo.ui.debug('moving bookmarks %r from %s to %s\n' % |
|
685 | 685 | (oldbmarks, hex(oldnode), hex(newnode))) |
|
686 | 686 | # Delete divergent bookmarks being parents of related newnodes |
|
687 | 687 | deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)', |
|
688 | 688 | allnewnodes, newnode, oldnode) |
|
689 | 689 | deletenodes = _containsnode(repo, deleterevs) |
|
690 | 690 | for name in oldbmarks: |
|
691 | 691 | bmarkchanges.append((name, newnode)) |
|
692 | 692 | for b in bookmarks.divergent2delete(repo, deletenodes, name): |
|
693 | 693 | bmarkchanges.append((b, None)) |
|
694 | 694 | |
|
695 | 695 | if bmarkchanges: |
|
696 | 696 | bmarks.applychanges(repo, tr, bmarkchanges) |
|
697 | 697 | |
|
698 | 698 | # Obsolete or strip nodes |
|
699 | 699 | if obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
700 | 700 | # If a node is already obsoleted, and we want to obsolete it |
|
701 | 701 | # without a successor, skip that obssolete request since it's |
|
702 | 702 | # unnecessary. That's the "if s or not isobs(n)" check below. |
|
703 | 703 | # Also sort the node in topology order, that might be useful for |
|
704 | 704 | # some obsstore logic. |
|
705 | 705 | # NOTE: the filtering and sorting might belong to createmarkers. |
|
706 | 706 | isobs = unfi.obsstore.successors.__contains__ |
|
707 | 707 | torev = unfi.changelog.rev |
|
708 | 708 | sortfunc = lambda ns: torev(ns[0]) |
|
709 | 709 | rels = [(unfi[n], tuple(unfi[m] for m in s)) |
|
710 | 710 | for n, s in sorted(replacements.items(), key=sortfunc) |
|
711 | 711 | if s or not isobs(n)] |
|
712 | 712 | if rels: |
|
713 | 713 | obsolete.createmarkers(repo, rels, operation=operation, |
|
714 | 714 | metadata=metadata) |
|
715 | 715 | else: |
|
716 | 716 | from . import repair # avoid import cycle |
|
717 | 717 | tostrip = list(replacements) |
|
718 | 718 | if tostrip: |
|
719 | 719 | repair.delayedstrip(repo.ui, repo, tostrip, operation) |
|
720 | 720 | |
|
721 | 721 | def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None): |
|
722 | 722 | if opts is None: |
|
723 | 723 | opts = {} |
|
724 | 724 | m = matcher |
|
725 | 725 | if dry_run is None: |
|
726 | 726 | dry_run = opts.get('dry_run') |
|
727 | 727 | if similarity is None: |
|
728 | 728 | similarity = float(opts.get('similarity') or 0) |
|
729 | 729 | |
|
730 | 730 | ret = 0 |
|
731 | 731 | join = lambda f: os.path.join(prefix, f) |
|
732 | 732 | |
|
733 | 733 | wctx = repo[None] |
|
734 | 734 | for subpath in sorted(wctx.substate): |
|
735 | 735 | submatch = matchmod.subdirmatcher(subpath, m) |
|
736 | 736 | if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()): |
|
737 | 737 | sub = wctx.sub(subpath) |
|
738 | 738 | try: |
|
739 | 739 | if sub.addremove(submatch, prefix, opts, dry_run, similarity): |
|
740 | 740 | ret = 1 |
|
741 | 741 | except error.LookupError: |
|
742 | 742 | repo.ui.status(_("skipping missing subrepository: %s\n") |
|
743 | 743 | % join(subpath)) |
|
744 | 744 | |
|
745 | 745 | rejected = [] |
|
746 | 746 | def badfn(f, msg): |
|
747 | 747 | if f in m.files(): |
|
748 | 748 | m.bad(f, msg) |
|
749 | 749 | rejected.append(f) |
|
750 | 750 | |
|
751 | 751 | badmatch = matchmod.badmatch(m, badfn) |
|
752 | 752 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, |
|
753 | 753 | badmatch) |
|
754 | 754 | |
|
755 | 755 | unknownset = set(unknown + forgotten) |
|
756 | 756 | toprint = unknownset.copy() |
|
757 | 757 | toprint.update(deleted) |
|
758 | 758 | for abs in sorted(toprint): |
|
759 | 759 | if repo.ui.verbose or not m.exact(abs): |
|
760 | 760 | if abs in unknownset: |
|
761 | 761 | status = _('adding %s\n') % m.uipath(abs) |
|
762 | 762 | else: |
|
763 | 763 | status = _('removing %s\n') % m.uipath(abs) |
|
764 | 764 | repo.ui.status(status) |
|
765 | 765 | |
|
766 | 766 | renames = _findrenames(repo, m, added + unknown, removed + deleted, |
|
767 | 767 | similarity) |
|
768 | 768 | |
|
769 | 769 | if not dry_run: |
|
770 | 770 | _markchanges(repo, unknown + forgotten, deleted, renames) |
|
771 | 771 | |
|
772 | 772 | for f in rejected: |
|
773 | 773 | if f in m.files(): |
|
774 | 774 | return 1 |
|
775 | 775 | return ret |
|
776 | 776 | |
|
777 | 777 | def marktouched(repo, files, similarity=0.0): |
|
778 | 778 | '''Assert that files have somehow been operated upon. files are relative to |
|
779 | 779 | the repo root.''' |
|
780 | 780 | m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) |
|
781 | 781 | rejected = [] |
|
782 | 782 | |
|
783 | 783 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) |
|
784 | 784 | |
|
785 | 785 | if repo.ui.verbose: |
|
786 | 786 | unknownset = set(unknown + forgotten) |
|
787 | 787 | toprint = unknownset.copy() |
|
788 | 788 | toprint.update(deleted) |
|
789 | 789 | for abs in sorted(toprint): |
|
790 | 790 | if abs in unknownset: |
|
791 | 791 | status = _('adding %s\n') % abs |
|
792 | 792 | else: |
|
793 | 793 | status = _('removing %s\n') % abs |
|
794 | 794 | repo.ui.status(status) |
|
795 | 795 | |
|
796 | 796 | renames = _findrenames(repo, m, added + unknown, removed + deleted, |
|
797 | 797 | similarity) |
|
798 | 798 | |
|
799 | 799 | _markchanges(repo, unknown + forgotten, deleted, renames) |
|
800 | 800 | |
|
801 | 801 | for f in rejected: |
|
802 | 802 | if f in m.files(): |
|
803 | 803 | return 1 |
|
804 | 804 | return 0 |
|
805 | 805 | |
|
806 | 806 | def _interestingfiles(repo, matcher): |
|
807 | 807 | '''Walk dirstate with matcher, looking for files that addremove would care |
|
808 | 808 | about. |
|
809 | 809 | |
|
810 | 810 | This is different from dirstate.status because it doesn't care about |
|
811 | 811 | whether files are modified or clean.''' |
|
812 | 812 | added, unknown, deleted, removed, forgotten = [], [], [], [], [] |
|
813 | 813 | audit_path = pathutil.pathauditor(repo.root, cached=True) |
|
814 | 814 | |
|
815 | 815 | ctx = repo[None] |
|
816 | 816 | dirstate = repo.dirstate |
|
817 | 817 | walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate), |
|
818 | 818 | unknown=True, ignored=False, full=False) |
|
819 | 819 | for abs, st in walkresults.iteritems(): |
|
820 | 820 | dstate = dirstate[abs] |
|
821 | 821 | if dstate == '?' and audit_path.check(abs): |
|
822 | 822 | unknown.append(abs) |
|
823 | 823 | elif dstate != 'r' and not st: |
|
824 | 824 | deleted.append(abs) |
|
825 | 825 | elif dstate == 'r' and st: |
|
826 | 826 | forgotten.append(abs) |
|
827 | 827 | # for finding renames |
|
828 | 828 | elif dstate == 'r' and not st: |
|
829 | 829 | removed.append(abs) |
|
830 | 830 | elif dstate == 'a': |
|
831 | 831 | added.append(abs) |
|
832 | 832 | |
|
833 | 833 | return added, unknown, deleted, removed, forgotten |
|
834 | 834 | |
|
835 | 835 | def _findrenames(repo, matcher, added, removed, similarity): |
|
836 | 836 | '''Find renames from removed files to added ones.''' |
|
837 | 837 | renames = {} |
|
838 | 838 | if similarity > 0: |
|
839 | 839 | for old, new, score in similar.findrenames(repo, added, removed, |
|
840 | 840 | similarity): |
|
841 | 841 | if (repo.ui.verbose or not matcher.exact(old) |
|
842 | 842 | or not matcher.exact(new)): |
|
843 | 843 | repo.ui.status(_('recording removal of %s as rename to %s ' |
|
844 | 844 | '(%d%% similar)\n') % |
|
845 | 845 | (matcher.rel(old), matcher.rel(new), |
|
846 | 846 | score * 100)) |
|
847 | 847 | renames[new] = old |
|
848 | 848 | return renames |
|
849 | 849 | |
|
850 | 850 | def _markchanges(repo, unknown, deleted, renames): |
|
851 | 851 | '''Marks the files in unknown as added, the files in deleted as removed, |
|
852 | 852 | and the files in renames as copied.''' |
|
853 | 853 | wctx = repo[None] |
|
854 | 854 | with repo.wlock(): |
|
855 | 855 | wctx.forget(deleted) |
|
856 | 856 | wctx.add(unknown) |
|
857 | 857 | for new, old in renames.iteritems(): |
|
858 | 858 | wctx.copy(old, new) |
|
859 | 859 | |
|
860 | 860 | def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): |
|
861 | 861 | """Update the dirstate to reflect the intent of copying src to dst. For |
|
862 | 862 | different reasons it might not end with dst being marked as copied from src. |
|
863 | 863 | """ |
|
864 | 864 | origsrc = repo.dirstate.copied(src) or src |
|
865 | 865 | if dst == origsrc: # copying back a copy? |
|
866 | 866 | if repo.dirstate[dst] not in 'mn' and not dryrun: |
|
867 | 867 | repo.dirstate.normallookup(dst) |
|
868 | 868 | else: |
|
869 | 869 | if repo.dirstate[origsrc] == 'a' and origsrc == src: |
|
870 | 870 | if not ui.quiet: |
|
871 | 871 | ui.warn(_("%s has not been committed yet, so no copy " |
|
872 | 872 | "data will be stored for %s.\n") |
|
873 | 873 | % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))) |
|
874 | 874 | if repo.dirstate[dst] in '?r' and not dryrun: |
|
875 | 875 | wctx.add([dst]) |
|
876 | 876 | elif not dryrun: |
|
877 | 877 | wctx.copy(origsrc, dst) |
|
878 | 878 | |
|
879 | 879 | def readrequires(opener, supported): |
|
880 | 880 | '''Reads and parses .hg/requires and checks if all entries found |
|
881 | 881 | are in the list of supported features.''' |
|
882 | 882 | requirements = set(opener.read("requires").splitlines()) |
|
883 | 883 | missings = [] |
|
884 | 884 | for r in requirements: |
|
885 | 885 | if r not in supported: |
|
886 | 886 | if not r or not r[0:1].isalnum(): |
|
887 | 887 | raise error.RequirementError(_(".hg/requires file is corrupt")) |
|
888 | 888 | missings.append(r) |
|
889 | 889 | missings.sort() |
|
890 | 890 | if missings: |
|
891 | 891 | raise error.RequirementError( |
|
892 | 892 | _("repository requires features unknown to this Mercurial: %s") |
|
893 | 893 | % " ".join(missings), |
|
894 | 894 | hint=_("see https://mercurial-scm.org/wiki/MissingRequirement" |
|
895 | 895 | " for more information")) |
|
896 | 896 | return requirements |
|
897 | 897 | |
|
898 | 898 | def writerequires(opener, requirements): |
|
899 | 899 | with opener('requires', 'w') as fp: |
|
900 | 900 | for r in sorted(requirements): |
|
901 | 901 | fp.write("%s\n" % r) |
|
902 | 902 | |
|
903 | 903 | class filecachesubentry(object): |
|
904 | 904 | def __init__(self, path, stat): |
|
905 | 905 | self.path = path |
|
906 | 906 | self.cachestat = None |
|
907 | 907 | self._cacheable = None |
|
908 | 908 | |
|
909 | 909 | if stat: |
|
910 | 910 | self.cachestat = filecachesubentry.stat(self.path) |
|
911 | 911 | |
|
912 | 912 | if self.cachestat: |
|
913 | 913 | self._cacheable = self.cachestat.cacheable() |
|
914 | 914 | else: |
|
915 | 915 | # None means we don't know yet |
|
916 | 916 | self._cacheable = None |
|
917 | 917 | |
|
918 | 918 | def refresh(self): |
|
919 | 919 | if self.cacheable(): |
|
920 | 920 | self.cachestat = filecachesubentry.stat(self.path) |
|
921 | 921 | |
|
922 | 922 | def cacheable(self): |
|
923 | 923 | if self._cacheable is not None: |
|
924 | 924 | return self._cacheable |
|
925 | 925 | |
|
926 | 926 | # we don't know yet, assume it is for now |
|
927 | 927 | return True |
|
928 | 928 | |
|
929 | 929 | def changed(self): |
|
930 | 930 | # no point in going further if we can't cache it |
|
931 | 931 | if not self.cacheable(): |
|
932 | 932 | return True |
|
933 | 933 | |
|
934 | 934 | newstat = filecachesubentry.stat(self.path) |
|
935 | 935 | |
|
936 | 936 | # we may not know if it's cacheable yet, check again now |
|
937 | 937 | if newstat and self._cacheable is None: |
|
938 | 938 | self._cacheable = newstat.cacheable() |
|
939 | 939 | |
|
940 | 940 | # check again |
|
941 | 941 | if not self._cacheable: |
|
942 | 942 | return True |
|
943 | 943 | |
|
944 | 944 | if self.cachestat != newstat: |
|
945 | 945 | self.cachestat = newstat |
|
946 | 946 | return True |
|
947 | 947 | else: |
|
948 | 948 | return False |
|
949 | 949 | |
|
950 | 950 | @staticmethod |
|
951 | 951 | def stat(path): |
|
952 | 952 | try: |
|
953 | 953 | return util.cachestat(path) |
|
954 | 954 | except OSError as e: |
|
955 | 955 | if e.errno != errno.ENOENT: |
|
956 | 956 | raise |
|
957 | 957 | |
|
958 | 958 | class filecacheentry(object): |
|
959 | 959 | def __init__(self, paths, stat=True): |
|
960 | 960 | self._entries = [] |
|
961 | 961 | for path in paths: |
|
962 | 962 | self._entries.append(filecachesubentry(path, stat)) |
|
963 | 963 | |
|
964 | 964 | def changed(self): |
|
965 | 965 | '''true if any entry has changed''' |
|
966 | 966 | for entry in self._entries: |
|
967 | 967 | if entry.changed(): |
|
968 | 968 | return True |
|
969 | 969 | return False |
|
970 | 970 | |
|
971 | 971 | def refresh(self): |
|
972 | 972 | for entry in self._entries: |
|
973 | 973 | entry.refresh() |
|
974 | 974 | |
|
975 | 975 | class filecache(object): |
|
976 | 976 | '''A property like decorator that tracks files under .hg/ for updates. |
|
977 | 977 | |
|
978 | 978 | Records stat info when called in _filecache. |
|
979 | 979 | |
|
980 | 980 | On subsequent calls, compares old stat info with new info, and recreates the |
|
981 | 981 | object when any of the files changes, updating the new stat info in |
|
982 | 982 | _filecache. |
|
983 | 983 | |
|
984 | 984 | Mercurial either atomic renames or appends for files under .hg, |
|
985 | 985 | so to ensure the cache is reliable we need the filesystem to be able |
|
986 | 986 | to tell us if a file has been replaced. If it can't, we fallback to |
|
987 | 987 | recreating the object on every call (essentially the same behavior as |
|
988 | 988 | propertycache). |
|
989 | 989 | |
|
990 | 990 | ''' |
|
991 | 991 | def __init__(self, *paths): |
|
992 | 992 | self.paths = paths |
|
993 | 993 | |
|
994 | 994 | def join(self, obj, fname): |
|
995 | 995 | """Used to compute the runtime path of a cached file. |
|
996 | 996 | |
|
997 | 997 | Users should subclass filecache and provide their own version of this |
|
998 | 998 | function to call the appropriate join function on 'obj' (an instance |
|
999 | 999 | of the class that its member function was decorated). |
|
1000 | 1000 | """ |
|
1001 | 1001 | raise NotImplementedError |
|
1002 | 1002 | |
|
1003 | 1003 | def __call__(self, func): |
|
1004 | 1004 | self.func = func |
|
1005 | 1005 | self.name = func.__name__.encode('ascii') |
|
1006 | 1006 | return self |
|
1007 | 1007 | |
|
1008 | 1008 | def __get__(self, obj, type=None): |
|
1009 | 1009 | # if accessed on the class, return the descriptor itself. |
|
1010 | 1010 | if obj is None: |
|
1011 | 1011 | return self |
|
1012 | 1012 | # do we need to check if the file changed? |
|
1013 | 1013 | if self.name in obj.__dict__: |
|
1014 | 1014 | assert self.name in obj._filecache, self.name |
|
1015 | 1015 | return obj.__dict__[self.name] |
|
1016 | 1016 | |
|
1017 | 1017 | entry = obj._filecache.get(self.name) |
|
1018 | 1018 | |
|
1019 | 1019 | if entry: |
|
1020 | 1020 | if entry.changed(): |
|
1021 | 1021 | entry.obj = self.func(obj) |
|
1022 | 1022 | else: |
|
1023 | 1023 | paths = [self.join(obj, path) for path in self.paths] |
|
1024 | 1024 | |
|
1025 | 1025 | # We stat -before- creating the object so our cache doesn't lie if |
|
1026 | 1026 | # a writer modified between the time we read and stat |
|
1027 | 1027 | entry = filecacheentry(paths, True) |
|
1028 | 1028 | entry.obj = self.func(obj) |
|
1029 | 1029 | |
|
1030 | 1030 | obj._filecache[self.name] = entry |
|
1031 | 1031 | |
|
1032 | 1032 | obj.__dict__[self.name] = entry.obj |
|
1033 | 1033 | return entry.obj |
|
1034 | 1034 | |
|
1035 | 1035 | def __set__(self, obj, value): |
|
1036 | 1036 | if self.name not in obj._filecache: |
|
1037 | 1037 | # we add an entry for the missing value because X in __dict__ |
|
1038 | 1038 | # implies X in _filecache |
|
1039 | 1039 | paths = [self.join(obj, path) for path in self.paths] |
|
1040 | 1040 | ce = filecacheentry(paths, False) |
|
1041 | 1041 | obj._filecache[self.name] = ce |
|
1042 | 1042 | else: |
|
1043 | 1043 | ce = obj._filecache[self.name] |
|
1044 | 1044 | |
|
1045 | 1045 | ce.obj = value # update cached copy |
|
1046 | 1046 | obj.__dict__[self.name] = value # update copy returned by obj.x |
|
1047 | 1047 | |
|
1048 | 1048 | def __delete__(self, obj): |
|
1049 | 1049 | try: |
|
1050 | 1050 | del obj.__dict__[self.name] |
|
1051 | 1051 | except KeyError: |
|
1052 | 1052 | raise AttributeError(self.name) |
|
1053 | 1053 | |
|
1054 | 1054 | def extdatasource(repo, source): |
|
1055 | 1055 | """Gather a map of rev -> value dict from the specified source |
|
1056 | 1056 | |
|
1057 | 1057 | A source spec is treated as a URL, with a special case shell: type |
|
1058 | 1058 | for parsing the output from a shell command. |
|
1059 | 1059 | |
|
1060 | 1060 | The data is parsed as a series of newline-separated records where |
|
1061 | 1061 | each record is a revision specifier optionally followed by a space |
|
1062 | 1062 | and a freeform string value. If the revision is known locally, it |
|
1063 | 1063 | is converted to a rev, otherwise the record is skipped. |
|
1064 | 1064 | |
|
1065 | 1065 | Note that both key and value are treated as UTF-8 and converted to |
|
1066 | 1066 | the local encoding. This allows uniformity between local and |
|
1067 | 1067 | remote data sources. |
|
1068 | 1068 | """ |
|
1069 | 1069 | |
|
1070 | 1070 | spec = repo.ui.config("extdata", source) |
|
1071 | 1071 | if not spec: |
|
1072 | 1072 | raise error.Abort(_("unknown extdata source '%s'") % source) |
|
1073 | 1073 | |
|
1074 | 1074 | data = {} |
|
1075 | 1075 | src = proc = None |
|
1076 | 1076 | try: |
|
1077 | 1077 | if spec.startswith("shell:"): |
|
1078 | 1078 | # external commands should be run relative to the repo root |
|
1079 | 1079 | cmd = spec[6:] |
|
1080 | 1080 | proc = subprocess.Popen(cmd, shell=True, bufsize=-1, |
|
1081 | 1081 | close_fds=util.closefds, |
|
1082 | 1082 | stdout=subprocess.PIPE, cwd=repo.root) |
|
1083 | 1083 | src = proc.stdout |
|
1084 | 1084 | else: |
|
1085 | 1085 | # treat as a URL or file |
|
1086 | 1086 | src = url.open(repo.ui, spec) |
|
1087 | 1087 | for l in src: |
|
1088 | 1088 | if " " in l: |
|
1089 | 1089 | k, v = l.strip().split(" ", 1) |
|
1090 | 1090 | else: |
|
1091 | 1091 | k, v = l.strip(), "" |
|
1092 | 1092 | |
|
1093 | 1093 | k = encoding.tolocal(k) |
|
1094 | 1094 | try: |
|
1095 | 1095 | data[repo[k].rev()] = encoding.tolocal(v) |
|
1096 | 1096 | except (error.LookupError, error.RepoLookupError): |
|
1097 | 1097 | pass # we ignore data for nodes that don't exist locally |
|
1098 | 1098 | finally: |
|
1099 | 1099 | if proc: |
|
1100 | 1100 | proc.communicate() |
|
1101 | 1101 | if src: |
|
1102 | 1102 | src.close() |
|
1103 | 1103 | if proc and proc.returncode != 0: |
|
1104 | 1104 | raise error.Abort(_("extdata command '%s' failed: %s") |
|
1105 | 1105 | % (cmd, util.explainexit(proc.returncode)[0])) |
|
1106 | 1106 | |
|
1107 | 1107 | return data |
|
1108 | 1108 | |
|
1109 | 1109 | def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs): |
|
1110 | 1110 | if lock is None: |
|
1111 | 1111 | raise error.LockInheritanceContractViolation( |
|
1112 | 1112 | 'lock can only be inherited while held') |
|
1113 | 1113 | if environ is None: |
|
1114 | 1114 | environ = {} |
|
1115 | 1115 | with lock.inherit() as locker: |
|
1116 | 1116 | environ[envvar] = locker |
|
1117 | 1117 | return repo.ui.system(cmd, environ=environ, *args, **kwargs) |
|
1118 | 1118 | |
|
1119 | 1119 | def wlocksub(repo, cmd, *args, **kwargs): |
|
1120 | 1120 | """run cmd as a subprocess that allows inheriting repo's wlock |
|
1121 | 1121 | |
|
1122 | 1122 | This can only be called while the wlock is held. This takes all the |
|
1123 | 1123 | arguments that ui.system does, and returns the exit code of the |
|
1124 | 1124 | subprocess.""" |
|
1125 | 1125 | return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args, |
|
1126 | 1126 | **kwargs) |
|
1127 | 1127 | |
|
1128 | 1128 | def gdinitconfig(ui): |
|
1129 | 1129 | """helper function to know if a repo should be created as general delta |
|
1130 | 1130 | """ |
|
1131 | 1131 | # experimental config: format.generaldelta |
|
1132 | 1132 | return (ui.configbool('format', 'generaldelta') |
|
1133 | 1133 | or ui.configbool('format', 'usegeneraldelta')) |
|
1134 | 1134 | |
|
1135 | 1135 | def gddeltaconfig(ui): |
|
1136 | 1136 | """helper function to know if incoming delta should be optimised |
|
1137 | 1137 | """ |
|
1138 | 1138 | # experimental config: format.generaldelta |
|
1139 | 1139 | return ui.configbool('format', 'generaldelta') |
|
1140 | 1140 | |
|
1141 | 1141 | class simplekeyvaluefile(object): |
|
1142 | 1142 | """A simple file with key=value lines |
|
1143 | 1143 | |
|
1144 | 1144 | Keys must be alphanumerics and start with a letter, values must not |
|
1145 | 1145 | contain '\n' characters""" |
|
1146 | 1146 | firstlinekey = '__firstline' |
|
1147 | 1147 | |
|
1148 | 1148 | def __init__(self, vfs, path, keys=None): |
|
1149 | 1149 | self.vfs = vfs |
|
1150 | 1150 | self.path = path |
|
1151 | 1151 | |
|
1152 | 1152 | def read(self, firstlinenonkeyval=False): |
|
1153 | 1153 | """Read the contents of a simple key-value file |
|
1154 | 1154 | |
|
1155 | 1155 | 'firstlinenonkeyval' indicates whether the first line of file should |
|
1156 | 1156 | be treated as a key-value pair or reuturned fully under the |
|
1157 | 1157 | __firstline key.""" |
|
1158 | 1158 | lines = self.vfs.readlines(self.path) |
|
1159 | 1159 | d = {} |
|
1160 | 1160 | if firstlinenonkeyval: |
|
1161 | 1161 | if not lines: |
|
1162 | 1162 | e = _("empty simplekeyvalue file") |
|
1163 | 1163 | raise error.CorruptedState(e) |
|
1164 | 1164 | # we don't want to include '\n' in the __firstline |
|
1165 | 1165 | d[self.firstlinekey] = lines[0][:-1] |
|
1166 | 1166 | del lines[0] |
|
1167 | 1167 | |
|
1168 | 1168 | try: |
|
1169 | 1169 | # the 'if line.strip()' part prevents us from failing on empty |
|
1170 | 1170 | # lines which only contain '\n' therefore are not skipped |
|
1171 | 1171 | # by 'if line' |
|
1172 | 1172 | updatedict = dict(line[:-1].split('=', 1) for line in lines |
|
1173 | 1173 | if line.strip()) |
|
1174 | 1174 | if self.firstlinekey in updatedict: |
|
1175 | 1175 | e = _("%r can't be used as a key") |
|
1176 | 1176 | raise error.CorruptedState(e % self.firstlinekey) |
|
1177 | 1177 | d.update(updatedict) |
|
1178 | 1178 | except ValueError as e: |
|
1179 | 1179 | raise error.CorruptedState(str(e)) |
|
1180 | 1180 | return d |
|
1181 | 1181 | |
|
1182 | 1182 | def write(self, data, firstline=None): |
|
1183 | 1183 | """Write key=>value mapping to a file |
|
1184 | 1184 | data is a dict. Keys must be alphanumerical and start with a letter. |
|
1185 | 1185 | Values must not contain newline characters. |
|
1186 | 1186 | |
|
1187 | 1187 | If 'firstline' is not None, it is written to file before |
|
1188 | 1188 | everything else, as it is, not in a key=value form""" |
|
1189 | 1189 | lines = [] |
|
1190 | 1190 | if firstline is not None: |
|
1191 | 1191 | lines.append('%s\n' % firstline) |
|
1192 | 1192 | |
|
1193 | 1193 | for k, v in data.items(): |
|
1194 | 1194 | if k == self.firstlinekey: |
|
1195 | 1195 | e = "key name '%s' is reserved" % self.firstlinekey |
|
1196 | 1196 | raise error.ProgrammingError(e) |
|
1197 | 1197 | if not k[0:1].isalpha(): |
|
1198 | 1198 | e = "keys must start with a letter in a key-value file" |
|
1199 | 1199 | raise error.ProgrammingError(e) |
|
1200 | 1200 | if not k.isalnum(): |
|
1201 | 1201 | e = "invalid key name in a simple key-value file" |
|
1202 | 1202 | raise error.ProgrammingError(e) |
|
1203 | 1203 | if '\n' in v: |
|
1204 | 1204 | e = "invalid value in a simple key-value file" |
|
1205 | 1205 | raise error.ProgrammingError(e) |
|
1206 | 1206 | lines.append("%s=%s\n" % (k, v)) |
|
1207 | 1207 | with self.vfs(self.path, mode='wb', atomictemp=True) as fp: |
|
1208 | 1208 | fp.write(''.join(lines)) |
|
1209 | 1209 | |
|
1210 | 1210 | _reportobsoletedsource = [ |
|
1211 | 1211 | 'debugobsolete', |
|
1212 | 1212 | 'pull', |
|
1213 | 1213 | 'push', |
|
1214 | 1214 | 'serve', |
|
1215 | 1215 | 'unbundle', |
|
1216 | 1216 | ] |
|
1217 | 1217 | |
|
1218 | 1218 | _reportnewcssource = [ |
|
1219 | 1219 | 'pull', |
|
1220 | 1220 | 'unbundle', |
|
1221 | 1221 | ] |
|
1222 | 1222 | |
|
1223 | 1223 | # a list of (repo, ctx, files) functions called by various commands to allow |
|
1224 | 1224 | # extensions to ensure the corresponding files are available locally, before the |
|
1225 | 1225 | # command uses them. |
|
1226 | 1226 | fileprefetchhooks = util.hooks() |
|
1227 | 1227 | |
|
1228 | 1228 | # A marker that tells the evolve extension to suppress its own reporting |
|
1229 | 1229 | _reportstroubledchangesets = True |
|
1230 | 1230 | |
|
1231 | 1231 | def registersummarycallback(repo, otr, txnname=''): |
|
1232 | 1232 | """register a callback to issue a summary after the transaction is closed |
|
1233 | 1233 | """ |
|
1234 | 1234 | def txmatch(sources): |
|
1235 | 1235 | return any(txnname.startswith(source) for source in sources) |
|
1236 | 1236 | |
|
1237 | 1237 | categories = [] |
|
1238 | 1238 | |
|
1239 | 1239 | def reportsummary(func): |
|
1240 | 1240 | """decorator for report callbacks.""" |
|
1241 | 1241 | # The repoview life cycle is shorter than the one of the actual |
|
1242 | 1242 | # underlying repository. So the filtered object can die before the |
|
1243 | 1243 | # weakref is used leading to troubles. We keep a reference to the |
|
1244 | 1244 | # unfiltered object and restore the filtering when retrieving the |
|
1245 | 1245 | # repository through the weakref. |
|
1246 | 1246 | filtername = repo.filtername |
|
1247 | 1247 | reporef = weakref.ref(repo.unfiltered()) |
|
1248 | 1248 | def wrapped(tr): |
|
1249 | 1249 | repo = reporef() |
|
1250 | 1250 | if filtername: |
|
1251 | 1251 | repo = repo.filtered(filtername) |
|
1252 | 1252 | func(repo, tr) |
|
1253 | 1253 | newcat = '%02i-txnreport' % len(categories) |
|
1254 | 1254 | otr.addpostclose(newcat, wrapped) |
|
1255 | 1255 | categories.append(newcat) |
|
1256 | 1256 | return wrapped |
|
1257 | 1257 | |
|
1258 | 1258 | if txmatch(_reportobsoletedsource): |
|
1259 | 1259 | @reportsummary |
|
1260 | 1260 | def reportobsoleted(repo, tr): |
|
1261 | 1261 | obsoleted = obsutil.getobsoleted(repo, tr) |
|
1262 | 1262 | if obsoleted: |
|
1263 | 1263 | repo.ui.status(_('obsoleted %i changesets\n') |
|
1264 | 1264 | % len(obsoleted)) |
|
1265 | 1265 | |
|
1266 | 1266 | if (obsolete.isenabled(repo, obsolete.createmarkersopt) and |
|
1267 | 1267 | repo.ui.configbool('experimental', 'evolution.report-instabilities')): |
|
1268 | 1268 | instabilitytypes = [ |
|
1269 | 1269 | ('orphan', 'orphan'), |
|
1270 | 1270 | ('phase-divergent', 'phasedivergent'), |
|
1271 | 1271 | ('content-divergent', 'contentdivergent'), |
|
1272 | 1272 | ] |
|
1273 | 1273 | |
|
1274 | 1274 | def getinstabilitycounts(repo): |
|
1275 | 1275 | filtered = repo.changelog.filteredrevs |
|
1276 | 1276 | counts = {} |
|
1277 | 1277 | for instability, revset in instabilitytypes: |
|
1278 | 1278 | counts[instability] = len(set(obsolete.getrevs(repo, revset)) - |
|
1279 | 1279 | filtered) |
|
1280 | 1280 | return counts |
|
1281 | 1281 | |
|
1282 | 1282 | oldinstabilitycounts = getinstabilitycounts(repo) |
|
1283 | 1283 | @reportsummary |
|
1284 | 1284 | def reportnewinstabilities(repo, tr): |
|
1285 | 1285 | newinstabilitycounts = getinstabilitycounts(repo) |
|
1286 | 1286 | for instability, revset in instabilitytypes: |
|
1287 | 1287 | delta = (newinstabilitycounts[instability] - |
|
1288 | 1288 | oldinstabilitycounts[instability]) |
|
1289 | 1289 | if delta > 0: |
|
1290 | 1290 | repo.ui.warn(_('%i new %s changesets\n') % |
|
1291 | 1291 | (delta, instability)) |
|
1292 | 1292 | |
|
1293 | 1293 | if txmatch(_reportnewcssource): |
|
1294 | 1294 | @reportsummary |
|
1295 | 1295 | def reportnewcs(repo, tr): |
|
1296 | 1296 | """Report the range of new revisions pulled/unbundled.""" |
|
1297 | 1297 | newrevs = tr.changes.get('revs', xrange(0, 0)) |
|
1298 | 1298 | if not newrevs: |
|
1299 | 1299 | return |
|
1300 | 1300 | |
|
1301 | 1301 | # Compute the bounds of new revisions' range, excluding obsoletes. |
|
1302 | 1302 | unfi = repo.unfiltered() |
|
1303 | 1303 | revs = unfi.revs('%ld and not obsolete()', newrevs) |
|
1304 | 1304 | if not revs: |
|
1305 | 1305 | # Got only obsoletes. |
|
1306 | 1306 | return |
|
1307 | 1307 | minrev, maxrev = repo[revs.min()], repo[revs.max()] |
|
1308 | 1308 | |
|
1309 | 1309 | if minrev == maxrev: |
|
1310 | 1310 | revrange = minrev |
|
1311 | 1311 | else: |
|
1312 | 1312 | revrange = '%s:%s' % (minrev, maxrev) |
|
1313 | 1313 | repo.ui.status(_('new changesets %s\n') % revrange) |
|
1314 | 1314 | |
|
1315 | 1315 | def nodesummaries(repo, nodes, maxnumnodes=4): |
|
1316 | 1316 | if len(nodes) <= maxnumnodes or repo.ui.verbose: |
|
1317 | 1317 | return ' '.join(short(h) for h in nodes) |
|
1318 | 1318 | first = ' '.join(short(h) for h in nodes[:maxnumnodes]) |
|
1319 | 1319 | return _("%s and %d others") % (first, len(nodes) - maxnumnodes) |
|
1320 | 1320 | |
|
1321 | 1321 | def enforcesinglehead(repo, tr, desc): |
|
1322 | 1322 | """check that no named branch has multiple heads""" |
|
1323 | 1323 | if desc in ('strip', 'repair'): |
|
1324 | 1324 | # skip the logic during strip |
|
1325 | 1325 | return |
|
1326 | 1326 | visible = repo.filtered('visible') |
|
1327 | 1327 | # possible improvement: we could restrict the check to affected branch |
|
1328 | 1328 | for name, heads in visible.branchmap().iteritems(): |
|
1329 | 1329 | if len(heads) > 1: |
|
1330 | 1330 | msg = _('rejecting multiple heads on branch "%s"') |
|
1331 | 1331 | msg %= name |
|
1332 | 1332 | hint = _('%d heads: %s') |
|
1333 | 1333 | hint %= (len(heads), nodesummaries(repo, heads)) |
|
1334 | 1334 | raise error.Abort(msg, hint=hint) |
|
1335 | 1335 | |
|
1336 | 1336 | def wrapconvertsink(sink): |
|
1337 | 1337 | """Allow extensions to wrap the sink returned by convcmd.convertsink() |
|
1338 | 1338 | before it is used, whether or not the convert extension was formally loaded. |
|
1339 | 1339 | """ |
|
1340 | 1340 | return sink |
|
1341 | 1341 | |
|
1342 | 1342 | def unhidehashlikerevs(repo, specs, hiddentype): |
|
1343 | 1343 | """parse the user specs and unhide changesets whose hash or revision number |
|
1344 | 1344 | is passed. |
|
1345 | 1345 | |
|
1346 | 1346 | hiddentype can be: 1) 'warn': warn while unhiding changesets |
|
1347 | 1347 | 2) 'nowarn': don't warn while unhiding changesets |
|
1348 | 1348 | |
|
1349 | 1349 | returns a repo object with the required changesets unhidden |
|
1350 | 1350 | """ |
|
1351 | 1351 | if not repo.filtername or not repo.ui.configbool('experimental', |
|
1352 | 1352 | 'directaccess'): |
|
1353 | 1353 | return repo |
|
1354 | 1354 | |
|
1355 | 1355 | if repo.filtername not in ('visible', 'visible-hidden'): |
|
1356 | 1356 | return repo |
|
1357 | 1357 | |
|
1358 | 1358 | symbols = set() |
|
1359 | 1359 | for spec in specs: |
|
1360 | 1360 | try: |
|
1361 | 1361 | tree = revsetlang.parse(spec) |
|
1362 | 1362 | except error.ParseError: # will be reported by scmutil.revrange() |
|
1363 | 1363 | continue |
|
1364 | 1364 | |
|
1365 | 1365 | symbols.update(revsetlang.gethashlikesymbols(tree)) |
|
1366 | 1366 | |
|
1367 | 1367 | if not symbols: |
|
1368 | 1368 | return repo |
|
1369 | 1369 | |
|
1370 | 1370 | revs = _getrevsfromsymbols(repo, symbols) |
|
1371 | 1371 | |
|
1372 | 1372 | if not revs: |
|
1373 | 1373 | return repo |
|
1374 | 1374 | |
|
1375 | 1375 | if hiddentype == 'warn': |
|
1376 | 1376 | unfi = repo.unfiltered() |
|
1377 | 1377 | revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs]) |
|
1378 | 1378 | repo.ui.warn(_("warning: accessing hidden changesets for write " |
|
1379 | 1379 | "operation: %s\n") % revstr) |
|
1380 | 1380 | |
|
1381 | 1381 | # we have to use new filtername to separate branch/tags cache until we can |
|
1382 | 1382 | # disbale these cache when revisions are dynamically pinned. |
|
1383 | 1383 | return repo.filtered('visible-hidden', revs) |
|
1384 | 1384 | |
|
1385 | 1385 | def _getrevsfromsymbols(repo, symbols): |
|
1386 | 1386 | """parse the list of symbols and returns a set of revision numbers of hidden |
|
1387 | 1387 | changesets present in symbols""" |
|
1388 | 1388 | revs = set() |
|
1389 | 1389 | unfi = repo.unfiltered() |
|
1390 | 1390 | unficl = unfi.changelog |
|
1391 | 1391 | cl = repo.changelog |
|
1392 | 1392 | tiprev = len(unficl) |
|
1393 | 1393 | pmatch = unficl._partialmatch |
|
1394 | 1394 | allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums') |
|
1395 | 1395 | for s in symbols: |
|
1396 | 1396 | try: |
|
1397 | 1397 | n = int(s) |
|
1398 | 1398 | if n <= tiprev: |
|
1399 | 1399 | if not allowrevnums: |
|
1400 | 1400 | continue |
|
1401 | 1401 | else: |
|
1402 | 1402 | if n not in cl: |
|
1403 | 1403 | revs.add(n) |
|
1404 | 1404 | continue |
|
1405 | 1405 | except ValueError: |
|
1406 | 1406 | pass |
|
1407 | 1407 | |
|
1408 | 1408 | try: |
|
1409 | 1409 | s = pmatch(s) |
|
1410 | 1410 | except error.LookupError: |
|
1411 | 1411 | s = None |
|
1412 | 1412 | |
|
1413 | 1413 | if s is not None: |
|
1414 | 1414 | rev = unficl.rev(s) |
|
1415 | 1415 | if rev not in cl: |
|
1416 | 1416 | revs.add(rev) |
|
1417 | 1417 | |
|
1418 | 1418 | return revs |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now