##// END OF EJS Templates
bundle-ng: add bundlecaps argument to getbundle() command
Benoit Boissinot -
r19201:309c439c default
parent child Browse files
Show More
@@ -1,299 +1,302 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import nullrev
10 10 import mdiff, util, dagutil
11 11 import struct, os, bz2, zlib, tempfile
12 12
13 13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
14 14
15 15 def readexactly(stream, n):
16 16 '''read n bytes from stream.read and abort if less was available'''
17 17 s = stream.read(n)
18 18 if len(s) < n:
19 19 raise util.Abort(_("stream ended unexpectedly"
20 20 " (got %d bytes, expected %d)")
21 21 % (len(s), n))
22 22 return s
23 23
24 24 def getchunk(stream):
25 25 """return the next chunk from stream as a string"""
26 26 d = readexactly(stream, 4)
27 27 l = struct.unpack(">l", d)[0]
28 28 if l <= 4:
29 29 if l:
30 30 raise util.Abort(_("invalid chunk length %d") % l)
31 31 return ""
32 32 return readexactly(stream, l - 4)
33 33
34 34 def chunkheader(length):
35 35 """return a changegroup chunk header (string)"""
36 36 return struct.pack(">l", length + 4)
37 37
38 38 def closechunk():
39 39 """return a changegroup chunk header (string) for a zero-length chunk"""
40 40 return struct.pack(">l", 0)
41 41
42 42 class nocompress(object):
43 43 def compress(self, x):
44 44 return x
45 45 def flush(self):
46 46 return ""
47 47
48 48 bundletypes = {
49 49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
50 50 # since the unification ssh accepts a header but there
51 51 # is no capability signaling it.
52 52 "HG10UN": ("HG10UN", nocompress),
53 53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
54 54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
55 55 }
56 56
57 57 # hgweb uses this list to communicate its preferred type
58 58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
59 59
60 60 def writebundle(cg, filename, bundletype):
61 61 """Write a bundle file and return its filename.
62 62
63 63 Existing files will not be overwritten.
64 64 If no filename is specified, a temporary file is created.
65 65 bz2 compression can be turned off.
66 66 The bundle file will be deleted in case of errors.
67 67 """
68 68
69 69 fh = None
70 70 cleanup = None
71 71 try:
72 72 if filename:
73 73 fh = open(filename, "wb")
74 74 else:
75 75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
76 76 fh = os.fdopen(fd, "wb")
77 77 cleanup = filename
78 78
79 79 header, compressor = bundletypes[bundletype]
80 80 fh.write(header)
81 81 z = compressor()
82 82
83 83 # parse the changegroup data, otherwise we will block
84 84 # in case of sshrepo because we don't know the end of the stream
85 85
86 86 # an empty chunkgroup is the end of the changegroup
87 87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
88 88 # after that, an empty chunkgroup is the end of the changegroup
89 89 empty = False
90 90 count = 0
91 91 while not empty or count <= 2:
92 92 empty = True
93 93 count += 1
94 94 while True:
95 95 chunk = getchunk(cg)
96 96 if not chunk:
97 97 break
98 98 empty = False
99 99 fh.write(z.compress(chunkheader(len(chunk))))
100 100 pos = 0
101 101 while pos < len(chunk):
102 102 next = pos + 2**20
103 103 fh.write(z.compress(chunk[pos:next]))
104 104 pos = next
105 105 fh.write(z.compress(closechunk()))
106 106 fh.write(z.flush())
107 107 cleanup = None
108 108 return filename
109 109 finally:
110 110 if fh is not None:
111 111 fh.close()
112 112 if cleanup is not None:
113 113 os.unlink(cleanup)
114 114
115 115 def decompressor(fh, alg):
116 116 if alg == 'UN':
117 117 return fh
118 118 elif alg == 'GZ':
119 119 def generator(f):
120 120 zd = zlib.decompressobj()
121 121 for chunk in util.filechunkiter(f):
122 122 yield zd.decompress(chunk)
123 123 elif alg == 'BZ':
124 124 def generator(f):
125 125 zd = bz2.BZ2Decompressor()
126 126 zd.decompress("BZ")
127 127 for chunk in util.filechunkiter(f, 4096):
128 128 yield zd.decompress(chunk)
129 129 else:
130 130 raise util.Abort("unknown bundle compression '%s'" % alg)
131 131 return util.chunkbuffer(generator(fh))
132 132
133 133 class unbundle10(object):
134 134 deltaheader = _BUNDLE10_DELTA_HEADER
135 135 deltaheadersize = struct.calcsize(deltaheader)
136 136 def __init__(self, fh, alg):
137 137 self._stream = decompressor(fh, alg)
138 138 self._type = alg
139 139 self.callback = None
140 140 def compressed(self):
141 141 return self._type != 'UN'
142 142 def read(self, l):
143 143 return self._stream.read(l)
144 144 def seek(self, pos):
145 145 return self._stream.seek(pos)
146 146 def tell(self):
147 147 return self._stream.tell()
148 148 def close(self):
149 149 return self._stream.close()
150 150
151 151 def chunklength(self):
152 152 d = readexactly(self._stream, 4)
153 153 l = struct.unpack(">l", d)[0]
154 154 if l <= 4:
155 155 if l:
156 156 raise util.Abort(_("invalid chunk length %d") % l)
157 157 return 0
158 158 if self.callback:
159 159 self.callback()
160 160 return l - 4
161 161
162 162 def changelogheader(self):
163 163 """v10 does not have a changelog header chunk"""
164 164 return {}
165 165
166 166 def manifestheader(self):
167 167 """v10 does not have a manifest header chunk"""
168 168 return {}
169 169
170 170 def filelogheader(self):
171 171 """return the header of the filelogs chunk, v10 only has the filename"""
172 172 l = self.chunklength()
173 173 if not l:
174 174 return {}
175 175 fname = readexactly(self._stream, l)
176 176 return dict(filename=fname)
177 177
178 178 def _deltaheader(self, headertuple, prevnode):
179 179 node, p1, p2, cs = headertuple
180 180 if prevnode is None:
181 181 deltabase = p1
182 182 else:
183 183 deltabase = prevnode
184 184 return node, p1, p2, deltabase, cs
185 185
186 186 def deltachunk(self, prevnode):
187 187 l = self.chunklength()
188 188 if not l:
189 189 return {}
190 190 headerdata = readexactly(self._stream, self.deltaheadersize)
191 191 header = struct.unpack(self.deltaheader, headerdata)
192 192 delta = readexactly(self._stream, l - self.deltaheadersize)
193 193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
194 194 return dict(node=node, p1=p1, p2=p2, cs=cs,
195 195 deltabase=deltabase, delta=delta)
196 196
197 197 class headerlessfixup(object):
198 198 def __init__(self, fh, h):
199 199 self._h = h
200 200 self._fh = fh
201 201 def read(self, n):
202 202 if self._h:
203 203 d, self._h = self._h[:n], self._h[n:]
204 204 if len(d) < n:
205 205 d += readexactly(self._fh, n - len(d))
206 206 return d
207 207 return readexactly(self._fh, n)
208 208
209 209 def readbundle(fh, fname):
210 210 header = readexactly(fh, 6)
211 211
212 212 if not fname:
213 213 fname = "stream"
214 214 if not header.startswith('HG') and header.startswith('\0'):
215 215 fh = headerlessfixup(fh, header)
216 216 header = "HG10UN"
217 217
218 218 magic, version, alg = header[0:2], header[2:4], header[4:6]
219 219
220 220 if magic != 'HG':
221 221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
222 222 if version != '10':
223 223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
224 224 return unbundle10(fh, alg)
225 225
226 226 class bundle10(object):
227 227 deltaheader = _BUNDLE10_DELTA_HEADER
228 def __init__(self):
229 pass
228 def __init__(self, bundlecaps=None):
229 # Set of capabilities we can use to build the bundle.
230 if bundlecaps is None:
231 bundlecaps = set()
232 self._bundlecaps = bundlecaps
230 233 def start(self, lookup):
231 234 self._lookup = lookup
232 235 def close(self):
233 236 return closechunk()
234 237
235 238 def fileheader(self, fname):
236 239 return chunkheader(len(fname)) + fname
237 240
238 241 def group(self, nodelist, revlog, reorder=None):
239 242 """Calculate a delta group, yielding a sequence of changegroup chunks
240 243 (strings).
241 244
242 245 Given a list of changeset revs, return a set of deltas and
243 246 metadata corresponding to nodes. The first delta is
244 247 first parent(nodelist[0]) -> nodelist[0], the receiver is
245 248 guaranteed to have this parent as it has all history before
246 249 these changesets. In the case firstparent is nullrev the
247 250 changegroup starts with a full revision.
248 251 """
249 252
250 253 # if we don't have any revisions touched by these changesets, bail
251 254 if len(nodelist) == 0:
252 255 yield self.close()
253 256 return
254 257
255 258 # for generaldelta revlogs, we linearize the revs; this will both be
256 259 # much quicker and generate a much smaller bundle
257 260 if (revlog._generaldelta and reorder is not False) or reorder:
258 261 dag = dagutil.revlogdag(revlog)
259 262 revs = set(revlog.rev(n) for n in nodelist)
260 263 revs = dag.linearize(revs)
261 264 else:
262 265 revs = sorted([revlog.rev(n) for n in nodelist])
263 266
264 267 # add the parent of the first rev
265 268 p = revlog.parentrevs(revs[0])[0]
266 269 revs.insert(0, p)
267 270
268 271 # build deltas
269 272 for r in xrange(len(revs) - 1):
270 273 prev, curr = revs[r], revs[r + 1]
271 274 for c in self.revchunk(revlog, curr, prev):
272 275 yield c
273 276
274 277 yield self.close()
275 278
276 279
277 280 def revchunk(self, revlog, rev, prev):
278 281 node = revlog.node(rev)
279 282 p1, p2 = revlog.parentrevs(rev)
280 283 base = prev
281 284
282 285 prefix = ''
283 286 if base == nullrev:
284 287 delta = revlog.revision(node)
285 288 prefix = mdiff.trivialdiffheader(len(delta))
286 289 else:
287 290 delta = revlog.revdiff(base, rev)
288 291 linknode = self._lookup(revlog, node)
289 292 p1n, p2n = revlog.parents(node)
290 293 basenode = revlog.node(base)
291 294 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
292 295 meta += prefix
293 296 l = len(meta) + len(delta)
294 297 yield chunkheader(l)
295 298 yield meta
296 299 yield delta
297 300 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
298 301 # do nothing with basenode, it is implicitly the previous one in HG10
299 302 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
@@ -1,5875 +1,5880 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, bin, nullid, nullrev, short
9 9 from lock import release
10 10 from i18n import _
11 11 import os, re, difflib, time, tempfile, errno
12 12 import hg, scmutil, util, revlog, copies, error, bookmarks
13 13 import patch, help, encoding, templatekw, discovery
14 14 import archival, changegroup, cmdutil, hbisect
15 15 import sshserver, hgweb, hgweb.server, commandserver
16 16 import merge as mergemod
17 17 import minirst, revset, fileset
18 18 import dagparser, context, simplemerge, graphmod
19 19 import random, setdiscovery, treediscovery, dagutil, pvec, localrepo
20 20 import phases, obsolete
21 21
22 22 table = {}
23 23
24 24 command = cmdutil.command(table)
25 25
26 26 # common command options
27 27
28 28 globalopts = [
29 29 ('R', 'repository', '',
30 30 _('repository root directory or name of overlay bundle file'),
31 31 _('REPO')),
32 32 ('', 'cwd', '',
33 33 _('change working directory'), _('DIR')),
34 34 ('y', 'noninteractive', None,
35 35 _('do not prompt, automatically pick the first choice for all prompts')),
36 36 ('q', 'quiet', None, _('suppress output')),
37 37 ('v', 'verbose', None, _('enable additional output')),
38 38 ('', 'config', [],
39 39 _('set/override config option (use \'section.name=value\')'),
40 40 _('CONFIG')),
41 41 ('', 'debug', None, _('enable debugging output')),
42 42 ('', 'debugger', None, _('start debugger')),
43 43 ('', 'encoding', encoding.encoding, _('set the charset encoding'),
44 44 _('ENCODE')),
45 45 ('', 'encodingmode', encoding.encodingmode,
46 46 _('set the charset encoding mode'), _('MODE')),
47 47 ('', 'traceback', None, _('always print a traceback on exception')),
48 48 ('', 'time', None, _('time how long the command takes')),
49 49 ('', 'profile', None, _('print command execution profile')),
50 50 ('', 'version', None, _('output version information and exit')),
51 51 ('h', 'help', None, _('display help and exit')),
52 52 ('', 'hidden', False, _('consider hidden changesets')),
53 53 ]
54 54
55 55 dryrunopts = [('n', 'dry-run', None,
56 56 _('do not perform actions, just print output'))]
57 57
58 58 remoteopts = [
59 59 ('e', 'ssh', '',
60 60 _('specify ssh command to use'), _('CMD')),
61 61 ('', 'remotecmd', '',
62 62 _('specify hg command to run on the remote side'), _('CMD')),
63 63 ('', 'insecure', None,
64 64 _('do not verify server certificate (ignoring web.cacerts config)')),
65 65 ]
66 66
67 67 walkopts = [
68 68 ('I', 'include', [],
69 69 _('include names matching the given patterns'), _('PATTERN')),
70 70 ('X', 'exclude', [],
71 71 _('exclude names matching the given patterns'), _('PATTERN')),
72 72 ]
73 73
74 74 commitopts = [
75 75 ('m', 'message', '',
76 76 _('use text as commit message'), _('TEXT')),
77 77 ('l', 'logfile', '',
78 78 _('read commit message from file'), _('FILE')),
79 79 ]
80 80
81 81 commitopts2 = [
82 82 ('d', 'date', '',
83 83 _('record the specified date as commit date'), _('DATE')),
84 84 ('u', 'user', '',
85 85 _('record the specified user as committer'), _('USER')),
86 86 ]
87 87
88 88 templateopts = [
89 89 ('', 'style', '',
90 90 _('display using template map file'), _('STYLE')),
91 91 ('', 'template', '',
92 92 _('display with template'), _('TEMPLATE')),
93 93 ]
94 94
95 95 logopts = [
96 96 ('p', 'patch', None, _('show patch')),
97 97 ('g', 'git', None, _('use git extended diff format')),
98 98 ('l', 'limit', '',
99 99 _('limit number of changes displayed'), _('NUM')),
100 100 ('M', 'no-merges', None, _('do not show merges')),
101 101 ('', 'stat', None, _('output diffstat-style summary of changes')),
102 102 ('G', 'graph', None, _("show the revision DAG")),
103 103 ] + templateopts
104 104
105 105 diffopts = [
106 106 ('a', 'text', None, _('treat all files as text')),
107 107 ('g', 'git', None, _('use git extended diff format')),
108 108 ('', 'nodates', None, _('omit dates from diff headers'))
109 109 ]
110 110
111 111 diffwsopts = [
112 112 ('w', 'ignore-all-space', None,
113 113 _('ignore white space when comparing lines')),
114 114 ('b', 'ignore-space-change', None,
115 115 _('ignore changes in the amount of white space')),
116 116 ('B', 'ignore-blank-lines', None,
117 117 _('ignore changes whose lines are all blank')),
118 118 ]
119 119
120 120 diffopts2 = [
121 121 ('p', 'show-function', None, _('show which function each change is in')),
122 122 ('', 'reverse', None, _('produce a diff that undoes the changes')),
123 123 ] + diffwsopts + [
124 124 ('U', 'unified', '',
125 125 _('number of lines of context to show'), _('NUM')),
126 126 ('', 'stat', None, _('output diffstat-style summary of changes')),
127 127 ]
128 128
129 129 mergetoolopts = [
130 130 ('t', 'tool', '', _('specify merge tool')),
131 131 ]
132 132
133 133 similarityopts = [
134 134 ('s', 'similarity', '',
135 135 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
136 136 ]
137 137
138 138 subrepoopts = [
139 139 ('S', 'subrepos', None,
140 140 _('recurse into subrepositories'))
141 141 ]
142 142
143 143 # Commands start here, listed alphabetically
144 144
145 145 @command('^add',
146 146 walkopts + subrepoopts + dryrunopts,
147 147 _('[OPTION]... [FILE]...'))
148 148 def add(ui, repo, *pats, **opts):
149 149 """add the specified files on the next commit
150 150
151 151 Schedule files to be version controlled and added to the
152 152 repository.
153 153
154 154 The files will be added to the repository at the next commit. To
155 155 undo an add before that, see :hg:`forget`.
156 156
157 157 If no names are given, add all files to the repository.
158 158
159 159 .. container:: verbose
160 160
161 161 An example showing how new (unknown) files are added
162 162 automatically by :hg:`add`::
163 163
164 164 $ ls
165 165 foo.c
166 166 $ hg status
167 167 ? foo.c
168 168 $ hg add
169 169 adding foo.c
170 170 $ hg status
171 171 A foo.c
172 172
173 173 Returns 0 if all files are successfully added.
174 174 """
175 175
176 176 m = scmutil.match(repo[None], pats, opts)
177 177 rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'),
178 178 opts.get('subrepos'), prefix="", explicitonly=False)
179 179 return rejected and 1 or 0
180 180
181 181 @command('addremove',
182 182 similarityopts + walkopts + dryrunopts,
183 183 _('[OPTION]... [FILE]...'))
184 184 def addremove(ui, repo, *pats, **opts):
185 185 """add all new files, delete all missing files
186 186
187 187 Add all new files and remove all missing files from the
188 188 repository.
189 189
190 190 New files are ignored if they match any of the patterns in
191 191 ``.hgignore``. As with add, these changes take effect at the next
192 192 commit.
193 193
194 194 Use the -s/--similarity option to detect renamed files. This
195 195 option takes a percentage between 0 (disabled) and 100 (files must
196 196 be identical) as its parameter. With a parameter greater than 0,
197 197 this compares every removed file with every added file and records
198 198 those similar enough as renames. Detecting renamed files this way
199 199 can be expensive. After using this option, :hg:`status -C` can be
200 200 used to check which files were identified as moved or renamed. If
201 201 not specified, -s/--similarity defaults to 100 and only renames of
202 202 identical files are detected.
203 203
204 204 Returns 0 if all files are successfully added.
205 205 """
206 206 try:
207 207 sim = float(opts.get('similarity') or 100)
208 208 except ValueError:
209 209 raise util.Abort(_('similarity must be a number'))
210 210 if sim < 0 or sim > 100:
211 211 raise util.Abort(_('similarity must be between 0 and 100'))
212 212 return scmutil.addremove(repo, pats, opts, similarity=sim / 100.0)
213 213
214 214 @command('^annotate|blame',
215 215 [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
216 216 ('', 'follow', None,
217 217 _('follow copies/renames and list the filename (DEPRECATED)')),
218 218 ('', 'no-follow', None, _("don't follow copies and renames")),
219 219 ('a', 'text', None, _('treat all files as text')),
220 220 ('u', 'user', None, _('list the author (long with -v)')),
221 221 ('f', 'file', None, _('list the filename')),
222 222 ('d', 'date', None, _('list the date (short with -q)')),
223 223 ('n', 'number', None, _('list the revision number (default)')),
224 224 ('c', 'changeset', None, _('list the changeset')),
225 225 ('l', 'line-number', None, _('show line number at the first appearance'))
226 226 ] + diffwsopts + walkopts,
227 227 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'))
228 228 def annotate(ui, repo, *pats, **opts):
229 229 """show changeset information by line for each file
230 230
231 231 List changes in files, showing the revision id responsible for
232 232 each line
233 233
234 234 This command is useful for discovering when a change was made and
235 235 by whom.
236 236
237 237 Without the -a/--text option, annotate will avoid processing files
238 238 it detects as binary. With -a, annotate will annotate the file
239 239 anyway, although the results will probably be neither useful
240 240 nor desirable.
241 241
242 242 Returns 0 on success.
243 243 """
244 244 if opts.get('follow'):
245 245 # --follow is deprecated and now just an alias for -f/--file
246 246 # to mimic the behavior of Mercurial before version 1.5
247 247 opts['file'] = True
248 248
249 249 datefunc = ui.quiet and util.shortdate or util.datestr
250 250 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
251 251
252 252 if not pats:
253 253 raise util.Abort(_('at least one filename or pattern is required'))
254 254
255 255 hexfn = ui.debugflag and hex or short
256 256
257 257 opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())),
258 258 ('number', ' ', lambda x: str(x[0].rev())),
259 259 ('changeset', ' ', lambda x: hexfn(x[0].node())),
260 260 ('date', ' ', getdate),
261 261 ('file', ' ', lambda x: x[0].path()),
262 262 ('line_number', ':', lambda x: str(x[1])),
263 263 ]
264 264
265 265 if (not opts.get('user') and not opts.get('changeset')
266 266 and not opts.get('date') and not opts.get('file')):
267 267 opts['number'] = True
268 268
269 269 linenumber = opts.get('line_number') is not None
270 270 if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
271 271 raise util.Abort(_('at least one of -n/-c is required for -l'))
272 272
273 273 funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)]
274 274 funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
275 275
276 276 def bad(x, y):
277 277 raise util.Abort("%s: %s" % (x, y))
278 278
279 279 ctx = scmutil.revsingle(repo, opts.get('rev'))
280 280 m = scmutil.match(ctx, pats, opts)
281 281 m.bad = bad
282 282 follow = not opts.get('no_follow')
283 283 diffopts = patch.diffopts(ui, opts, section='annotate')
284 284 for abs in ctx.walk(m):
285 285 fctx = ctx[abs]
286 286 if not opts.get('text') and util.binary(fctx.data()):
287 287 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
288 288 continue
289 289
290 290 lines = fctx.annotate(follow=follow, linenumber=linenumber,
291 291 diffopts=diffopts)
292 292 pieces = []
293 293
294 294 for f, sep in funcmap:
295 295 l = [f(n) for n, dummy in lines]
296 296 if l:
297 297 sized = [(x, encoding.colwidth(x)) for x in l]
298 298 ml = max([w for x, w in sized])
299 299 pieces.append(["%s%s%s" % (sep, ' ' * (ml - w), x)
300 300 for x, w in sized])
301 301
302 302 if pieces:
303 303 for p, l in zip(zip(*pieces), lines):
304 304 ui.write("%s: %s" % ("".join(p), l[1]))
305 305
306 306 if lines and not lines[-1][1].endswith('\n'):
307 307 ui.write('\n')
308 308
309 309 @command('archive',
310 310 [('', 'no-decode', None, _('do not pass files through decoders')),
311 311 ('p', 'prefix', '', _('directory prefix for files in archive'),
312 312 _('PREFIX')),
313 313 ('r', 'rev', '', _('revision to distribute'), _('REV')),
314 314 ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
315 315 ] + subrepoopts + walkopts,
316 316 _('[OPTION]... DEST'))
317 317 def archive(ui, repo, dest, **opts):
318 318 '''create an unversioned archive of a repository revision
319 319
320 320 By default, the revision used is the parent of the working
321 321 directory; use -r/--rev to specify a different revision.
322 322
323 323 The archive type is automatically detected based on file
324 324 extension (or override using -t/--type).
325 325
326 326 .. container:: verbose
327 327
328 328 Examples:
329 329
330 330 - create a zip file containing the 1.0 release::
331 331
332 332 hg archive -r 1.0 project-1.0.zip
333 333
334 334 - create a tarball excluding .hg files::
335 335
336 336 hg archive project.tar.gz -X ".hg*"
337 337
338 338 Valid types are:
339 339
340 340 :``files``: a directory full of files (default)
341 341 :``tar``: tar archive, uncompressed
342 342 :``tbz2``: tar archive, compressed using bzip2
343 343 :``tgz``: tar archive, compressed using gzip
344 344 :``uzip``: zip archive, uncompressed
345 345 :``zip``: zip archive, compressed using deflate
346 346
347 347 The exact name of the destination archive or directory is given
348 348 using a format string; see :hg:`help export` for details.
349 349
350 350 Each member added to an archive file has a directory prefix
351 351 prepended. Use -p/--prefix to specify a format string for the
352 352 prefix. The default is the basename of the archive, with suffixes
353 353 removed.
354 354
355 355 Returns 0 on success.
356 356 '''
357 357
358 358 ctx = scmutil.revsingle(repo, opts.get('rev'))
359 359 if not ctx:
360 360 raise util.Abort(_('no working directory: please specify a revision'))
361 361 node = ctx.node()
362 362 dest = cmdutil.makefilename(repo, dest, node)
363 363 if os.path.realpath(dest) == repo.root:
364 364 raise util.Abort(_('repository root cannot be destination'))
365 365
366 366 kind = opts.get('type') or archival.guesskind(dest) or 'files'
367 367 prefix = opts.get('prefix')
368 368
369 369 if dest == '-':
370 370 if kind == 'files':
371 371 raise util.Abort(_('cannot archive plain files to stdout'))
372 372 dest = cmdutil.makefileobj(repo, dest)
373 373 if not prefix:
374 374 prefix = os.path.basename(repo.root) + '-%h'
375 375
376 376 prefix = cmdutil.makefilename(repo, prefix, node)
377 377 matchfn = scmutil.match(ctx, [], opts)
378 378 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
379 379 matchfn, prefix, subrepos=opts.get('subrepos'))
380 380
381 381 @command('backout',
382 382 [('', 'merge', None, _('merge with old dirstate parent after backout')),
383 383 ('', 'parent', '',
384 384 _('parent to choose when backing out merge (DEPRECATED)'), _('REV')),
385 385 ('r', 'rev', '', _('revision to backout'), _('REV')),
386 386 ] + mergetoolopts + walkopts + commitopts + commitopts2,
387 387 _('[OPTION]... [-r] REV'))
388 388 def backout(ui, repo, node=None, rev=None, **opts):
389 389 '''reverse effect of earlier changeset
390 390
391 391 Prepare a new changeset with the effect of REV undone in the
392 392 current working directory.
393 393
394 394 If REV is the parent of the working directory, then this new changeset
395 395 is committed automatically. Otherwise, hg needs to merge the
396 396 changes and the merged result is left uncommitted.
397 397
398 398 .. note::
399 399 backout cannot be used to fix either an unwanted or
400 400 incorrect merge.
401 401
402 402 .. container:: verbose
403 403
404 404 By default, the pending changeset will have one parent,
405 405 maintaining a linear history. With --merge, the pending
406 406 changeset will instead have two parents: the old parent of the
407 407 working directory and a new child of REV that simply undoes REV.
408 408
409 409 Before version 1.7, the behavior without --merge was equivalent
410 410 to specifying --merge followed by :hg:`update --clean .` to
411 411 cancel the merge and leave the child of REV as a head to be
412 412 merged separately.
413 413
414 414 See :hg:`help dates` for a list of formats valid for -d/--date.
415 415
416 416 Returns 0 on success.
417 417 '''
418 418 if rev and node:
419 419 raise util.Abort(_("please specify just one revision"))
420 420
421 421 if not rev:
422 422 rev = node
423 423
424 424 if not rev:
425 425 raise util.Abort(_("please specify a revision to backout"))
426 426
427 427 date = opts.get('date')
428 428 if date:
429 429 opts['date'] = util.parsedate(date)
430 430
431 431 cmdutil.bailifchanged(repo)
432 432 node = scmutil.revsingle(repo, rev).node()
433 433
434 434 op1, op2 = repo.dirstate.parents()
435 435 a = repo.changelog.ancestor(op1, node)
436 436 if a != node:
437 437 raise util.Abort(_('cannot backout change on a different branch'))
438 438
439 439 p1, p2 = repo.changelog.parents(node)
440 440 if p1 == nullid:
441 441 raise util.Abort(_('cannot backout a change with no parents'))
442 442 if p2 != nullid:
443 443 if not opts.get('parent'):
444 444 raise util.Abort(_('cannot backout a merge changeset'))
445 445 p = repo.lookup(opts['parent'])
446 446 if p not in (p1, p2):
447 447 raise util.Abort(_('%s is not a parent of %s') %
448 448 (short(p), short(node)))
449 449 parent = p
450 450 else:
451 451 if opts.get('parent'):
452 452 raise util.Abort(_('cannot use --parent on non-merge changeset'))
453 453 parent = p1
454 454
455 455 # the backout should appear on the same branch
456 456 wlock = repo.wlock()
457 457 try:
458 458 branch = repo.dirstate.branch()
459 459 bheads = repo.branchheads(branch)
460 460 hg.clean(repo, node, show_stats=False)
461 461 repo.dirstate.setbranch(branch)
462 462 rctx = scmutil.revsingle(repo, hex(parent))
463 463 cmdutil.revert(ui, repo, rctx, repo.dirstate.parents())
464 464 if not opts.get('merge') and op1 != node:
465 465 try:
466 466 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
467 467 return hg.update(repo, op1)
468 468 finally:
469 469 ui.setconfig('ui', 'forcemerge', '')
470 470
471 471 e = cmdutil.commiteditor
472 472 if not opts['message'] and not opts['logfile']:
473 473 # we don't translate commit messages
474 474 opts['message'] = "Backed out changeset %s" % short(node)
475 475 e = cmdutil.commitforceeditor
476 476
477 477 def commitfunc(ui, repo, message, match, opts):
478 478 return repo.commit(message, opts.get('user'), opts.get('date'),
479 479 match, editor=e)
480 480 newnode = cmdutil.commit(ui, repo, commitfunc, [], opts)
481 481 cmdutil.commitstatus(repo, newnode, branch, bheads)
482 482
483 483 def nice(node):
484 484 return '%d:%s' % (repo.changelog.rev(node), short(node))
485 485 ui.status(_('changeset %s backs out changeset %s\n') %
486 486 (nice(repo.changelog.tip()), nice(node)))
487 487 if opts.get('merge') and op1 != node:
488 488 hg.clean(repo, op1, show_stats=False)
489 489 ui.status(_('merging with changeset %s\n')
490 490 % nice(repo.changelog.tip()))
491 491 try:
492 492 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
493 493 return hg.merge(repo, hex(repo.changelog.tip()))
494 494 finally:
495 495 ui.setconfig('ui', 'forcemerge', '')
496 496 finally:
497 497 wlock.release()
498 498 return 0
499 499
500 500 @command('bisect',
501 501 [('r', 'reset', False, _('reset bisect state')),
502 502 ('g', 'good', False, _('mark changeset good')),
503 503 ('b', 'bad', False, _('mark changeset bad')),
504 504 ('s', 'skip', False, _('skip testing changeset')),
505 505 ('e', 'extend', False, _('extend the bisect range')),
506 506 ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
507 507 ('U', 'noupdate', False, _('do not update to target'))],
508 508 _("[-gbsr] [-U] [-c CMD] [REV]"))
509 509 def bisect(ui, repo, rev=None, extra=None, command=None,
510 510 reset=None, good=None, bad=None, skip=None, extend=None,
511 511 noupdate=None):
512 512 """subdivision search of changesets
513 513
514 514 This command helps to find changesets which introduce problems. To
515 515 use, mark the earliest changeset you know exhibits the problem as
516 516 bad, then mark the latest changeset which is free from the problem
517 517 as good. Bisect will update your working directory to a revision
518 518 for testing (unless the -U/--noupdate option is specified). Once
519 519 you have performed tests, mark the working directory as good or
520 520 bad, and bisect will either update to another candidate changeset
521 521 or announce that it has found the bad revision.
522 522
523 523 As a shortcut, you can also use the revision argument to mark a
524 524 revision as good or bad without checking it out first.
525 525
526 526 If you supply a command, it will be used for automatic bisection.
527 527 The environment variable HG_NODE will contain the ID of the
528 528 changeset being tested. The exit status of the command will be
529 529 used to mark revisions as good or bad: status 0 means good, 125
530 530 means to skip the revision, 127 (command not found) will abort the
531 531 bisection, and any other non-zero exit status means the revision
532 532 is bad.
533 533
534 534 .. container:: verbose
535 535
536 536 Some examples:
537 537
538 538 - start a bisection with known bad revision 12, and good revision 34::
539 539
540 540 hg bisect --bad 34
541 541 hg bisect --good 12
542 542
543 543 - advance the current bisection by marking current revision as good or
544 544 bad::
545 545
546 546 hg bisect --good
547 547 hg bisect --bad
548 548
549 549 - mark the current revision, or a known revision, to be skipped (e.g. if
550 550 that revision is not usable because of another issue)::
551 551
552 552 hg bisect --skip
553 553 hg bisect --skip 23
554 554
555 555 - skip all revisions that do not touch directories ``foo`` or ``bar``
556 556
557 557 hg bisect --skip '!( file("path:foo") & file("path:bar") )'
558 558
559 559 - forget the current bisection::
560 560
561 561 hg bisect --reset
562 562
563 563 - use 'make && make tests' to automatically find the first broken
564 564 revision::
565 565
566 566 hg bisect --reset
567 567 hg bisect --bad 34
568 568 hg bisect --good 12
569 569 hg bisect --command 'make && make tests'
570 570
571 571 - see all changesets whose states are already known in the current
572 572 bisection::
573 573
574 574 hg log -r "bisect(pruned)"
575 575
576 576 - see the changeset currently being bisected (especially useful
577 577 if running with -U/--noupdate)::
578 578
579 579 hg log -r "bisect(current)"
580 580
581 581 - see all changesets that took part in the current bisection::
582 582
583 583 hg log -r "bisect(range)"
584 584
585 585 - with the graphlog extension, you can even get a nice graph::
586 586
587 587 hg log --graph -r "bisect(range)"
588 588
589 589 See :hg:`help revsets` for more about the `bisect()` keyword.
590 590
591 591 Returns 0 on success.
592 592 """
593 593 def extendbisectrange(nodes, good):
594 594 # bisect is incomplete when it ends on a merge node and
595 595 # one of the parent was not checked.
596 596 parents = repo[nodes[0]].parents()
597 597 if len(parents) > 1:
598 598 side = good and state['bad'] or state['good']
599 599 num = len(set(i.node() for i in parents) & set(side))
600 600 if num == 1:
601 601 return parents[0].ancestor(parents[1])
602 602 return None
603 603
604 604 def print_result(nodes, good):
605 605 displayer = cmdutil.show_changeset(ui, repo, {})
606 606 if len(nodes) == 1:
607 607 # narrowed it down to a single revision
608 608 if good:
609 609 ui.write(_("The first good revision is:\n"))
610 610 else:
611 611 ui.write(_("The first bad revision is:\n"))
612 612 displayer.show(repo[nodes[0]])
613 613 extendnode = extendbisectrange(nodes, good)
614 614 if extendnode is not None:
615 615 ui.write(_('Not all ancestors of this changeset have been'
616 616 ' checked.\nUse bisect --extend to continue the '
617 617 'bisection from\nthe common ancestor, %s.\n')
618 618 % extendnode)
619 619 else:
620 620 # multiple possible revisions
621 621 if good:
622 622 ui.write(_("Due to skipped revisions, the first "
623 623 "good revision could be any of:\n"))
624 624 else:
625 625 ui.write(_("Due to skipped revisions, the first "
626 626 "bad revision could be any of:\n"))
627 627 for n in nodes:
628 628 displayer.show(repo[n])
629 629 displayer.close()
630 630
631 631 def check_state(state, interactive=True):
632 632 if not state['good'] or not state['bad']:
633 633 if (good or bad or skip or reset) and interactive:
634 634 return
635 635 if not state['good']:
636 636 raise util.Abort(_('cannot bisect (no known good revisions)'))
637 637 else:
638 638 raise util.Abort(_('cannot bisect (no known bad revisions)'))
639 639 return True
640 640
641 641 # backward compatibility
642 642 if rev in "good bad reset init".split():
643 643 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
644 644 cmd, rev, extra = rev, extra, None
645 645 if cmd == "good":
646 646 good = True
647 647 elif cmd == "bad":
648 648 bad = True
649 649 else:
650 650 reset = True
651 651 elif extra or good + bad + skip + reset + extend + bool(command) > 1:
652 652 raise util.Abort(_('incompatible arguments'))
653 653
654 654 if reset:
655 655 p = repo.join("bisect.state")
656 656 if os.path.exists(p):
657 657 os.unlink(p)
658 658 return
659 659
660 660 state = hbisect.load_state(repo)
661 661
662 662 if command:
663 663 changesets = 1
664 664 try:
665 665 node = state['current'][0]
666 666 except LookupError:
667 667 if noupdate:
668 668 raise util.Abort(_('current bisect revision is unknown - '
669 669 'start a new bisect to fix'))
670 670 node, p2 = repo.dirstate.parents()
671 671 if p2 != nullid:
672 672 raise util.Abort(_('current bisect revision is a merge'))
673 673 try:
674 674 while changesets:
675 675 # update state
676 676 state['current'] = [node]
677 677 hbisect.save_state(repo, state)
678 678 status = util.system(command,
679 679 environ={'HG_NODE': hex(node)},
680 680 out=ui.fout)
681 681 if status == 125:
682 682 transition = "skip"
683 683 elif status == 0:
684 684 transition = "good"
685 685 # status < 0 means process was killed
686 686 elif status == 127:
687 687 raise util.Abort(_("failed to execute %s") % command)
688 688 elif status < 0:
689 689 raise util.Abort(_("%s killed") % command)
690 690 else:
691 691 transition = "bad"
692 692 ctx = scmutil.revsingle(repo, rev, node)
693 693 rev = None # clear for future iterations
694 694 state[transition].append(ctx.node())
695 695 ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition))
696 696 check_state(state, interactive=False)
697 697 # bisect
698 698 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
699 699 # update to next check
700 700 node = nodes[0]
701 701 if not noupdate:
702 702 cmdutil.bailifchanged(repo)
703 703 hg.clean(repo, node, show_stats=False)
704 704 finally:
705 705 state['current'] = [node]
706 706 hbisect.save_state(repo, state)
707 707 print_result(nodes, good)
708 708 return
709 709
710 710 # update state
711 711
712 712 if rev:
713 713 nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])]
714 714 else:
715 715 nodes = [repo.lookup('.')]
716 716
717 717 if good or bad or skip:
718 718 if good:
719 719 state['good'] += nodes
720 720 elif bad:
721 721 state['bad'] += nodes
722 722 elif skip:
723 723 state['skip'] += nodes
724 724 hbisect.save_state(repo, state)
725 725
726 726 if not check_state(state):
727 727 return
728 728
729 729 # actually bisect
730 730 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
731 731 if extend:
732 732 if not changesets:
733 733 extendnode = extendbisectrange(nodes, good)
734 734 if extendnode is not None:
735 735 ui.write(_("Extending search to changeset %d:%s\n"
736 736 % (extendnode.rev(), extendnode)))
737 737 state['current'] = [extendnode.node()]
738 738 hbisect.save_state(repo, state)
739 739 if noupdate:
740 740 return
741 741 cmdutil.bailifchanged(repo)
742 742 return hg.clean(repo, extendnode.node())
743 743 raise util.Abort(_("nothing to extend"))
744 744
745 745 if changesets == 0:
746 746 print_result(nodes, good)
747 747 else:
748 748 assert len(nodes) == 1 # only a single node can be tested next
749 749 node = nodes[0]
750 750 # compute the approximate number of remaining tests
751 751 tests, size = 0, 2
752 752 while size <= changesets:
753 753 tests, size = tests + 1, size * 2
754 754 rev = repo.changelog.rev(node)
755 755 ui.write(_("Testing changeset %d:%s "
756 756 "(%d changesets remaining, ~%d tests)\n")
757 757 % (rev, short(node), changesets, tests))
758 758 state['current'] = [node]
759 759 hbisect.save_state(repo, state)
760 760 if not noupdate:
761 761 cmdutil.bailifchanged(repo)
762 762 return hg.clean(repo, node)
763 763
764 764 @command('bookmarks|bookmark',
765 765 [('f', 'force', False, _('force')),
766 766 ('r', 'rev', '', _('revision'), _('REV')),
767 767 ('d', 'delete', False, _('delete a given bookmark')),
768 768 ('m', 'rename', '', _('rename a given bookmark'), _('NAME')),
769 769 ('i', 'inactive', False, _('mark a bookmark inactive'))],
770 770 _('hg bookmarks [OPTIONS]... [NAME]...'))
771 771 def bookmark(ui, repo, *names, **opts):
772 772 '''track a line of development with movable markers
773 773
774 774 Bookmarks are pointers to certain commits that move when committing.
775 775 Bookmarks are local. They can be renamed, copied and deleted. It is
776 776 possible to use :hg:`merge NAME` to merge from a given bookmark, and
777 777 :hg:`update NAME` to update to a given bookmark.
778 778
779 779 You can use :hg:`bookmark NAME` to set a bookmark on the working
780 780 directory's parent revision with the given name. If you specify
781 781 a revision using -r REV (where REV may be an existing bookmark),
782 782 the bookmark is assigned to that revision.
783 783
784 784 Bookmarks can be pushed and pulled between repositories (see :hg:`help
785 785 push` and :hg:`help pull`). This requires both the local and remote
786 786 repositories to support bookmarks. For versions prior to 1.8, this means
787 787 the bookmarks extension must be enabled.
788 788
789 789 If you set a bookmark called '@', new clones of the repository will
790 790 have that revision checked out (and the bookmark made active) by
791 791 default.
792 792
793 793 With -i/--inactive, the new bookmark will not be made the active
794 794 bookmark. If -r/--rev is given, the new bookmark will not be made
795 795 active even if -i/--inactive is not given. If no NAME is given, the
796 796 current active bookmark will be marked inactive.
797 797 '''
798 798 force = opts.get('force')
799 799 rev = opts.get('rev')
800 800 delete = opts.get('delete')
801 801 rename = opts.get('rename')
802 802 inactive = opts.get('inactive')
803 803
804 804 hexfn = ui.debugflag and hex or short
805 805 marks = repo._bookmarks
806 806 cur = repo.changectx('.').node()
807 807
808 808 def checkformat(mark):
809 809 mark = mark.strip()
810 810 if not mark:
811 811 raise util.Abort(_("bookmark names cannot consist entirely of "
812 812 "whitespace"))
813 813 scmutil.checknewlabel(repo, mark, 'bookmark')
814 814 return mark
815 815
816 816 def checkconflict(repo, mark, force=False, target=None):
817 817 if mark in marks and not force:
818 818 if target:
819 819 if marks[mark] == target and target == cur:
820 820 # re-activating a bookmark
821 821 return
822 822 anc = repo.changelog.ancestors([repo[target].rev()])
823 823 bmctx = repo[marks[mark]]
824 824 divs = [repo[b].node() for b in marks
825 825 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
826 826
827 827 # allow resolving a single divergent bookmark even if moving
828 828 # the bookmark across branches when a revision is specified
829 829 # that contains a divergent bookmark
830 830 if bmctx.rev() not in anc and target in divs:
831 831 bookmarks.deletedivergent(repo, [target], mark)
832 832 return
833 833
834 834 deletefrom = [b for b in divs
835 835 if repo[b].rev() in anc or b == target]
836 836 bookmarks.deletedivergent(repo, deletefrom, mark)
837 837 if bmctx.rev() in anc:
838 838 ui.status(_("moving bookmark '%s' forward from %s\n") %
839 839 (mark, short(bmctx.node())))
840 840 return
841 841 raise util.Abort(_("bookmark '%s' already exists "
842 842 "(use -f to force)") % mark)
843 843 if ((mark in repo.branchmap() or mark == repo.dirstate.branch())
844 844 and not force):
845 845 raise util.Abort(
846 846 _("a bookmark cannot have the name of an existing branch"))
847 847
848 848 if delete and rename:
849 849 raise util.Abort(_("--delete and --rename are incompatible"))
850 850 if delete and rev:
851 851 raise util.Abort(_("--rev is incompatible with --delete"))
852 852 if rename and rev:
853 853 raise util.Abort(_("--rev is incompatible with --rename"))
854 854 if not names and (delete or rev):
855 855 raise util.Abort(_("bookmark name required"))
856 856
857 857 if delete:
858 858 for mark in names:
859 859 if mark not in marks:
860 860 raise util.Abort(_("bookmark '%s' does not exist") % mark)
861 861 if mark == repo._bookmarkcurrent:
862 862 bookmarks.setcurrent(repo, None)
863 863 del marks[mark]
864 864 marks.write()
865 865
866 866 elif rename:
867 867 if not names:
868 868 raise util.Abort(_("new bookmark name required"))
869 869 elif len(names) > 1:
870 870 raise util.Abort(_("only one new bookmark name allowed"))
871 871 mark = checkformat(names[0])
872 872 if rename not in marks:
873 873 raise util.Abort(_("bookmark '%s' does not exist") % rename)
874 874 checkconflict(repo, mark, force)
875 875 marks[mark] = marks[rename]
876 876 if repo._bookmarkcurrent == rename and not inactive:
877 877 bookmarks.setcurrent(repo, mark)
878 878 del marks[rename]
879 879 marks.write()
880 880
881 881 elif names:
882 882 newact = None
883 883 for mark in names:
884 884 mark = checkformat(mark)
885 885 if newact is None:
886 886 newact = mark
887 887 if inactive and mark == repo._bookmarkcurrent:
888 888 bookmarks.setcurrent(repo, None)
889 889 return
890 890 tgt = cur
891 891 if rev:
892 892 tgt = scmutil.revsingle(repo, rev).node()
893 893 checkconflict(repo, mark, force, tgt)
894 894 marks[mark] = tgt
895 895 if not inactive and cur == marks[newact] and not rev:
896 896 bookmarks.setcurrent(repo, newact)
897 897 elif cur != tgt and newact == repo._bookmarkcurrent:
898 898 bookmarks.setcurrent(repo, None)
899 899 marks.write()
900 900
901 901 # Same message whether trying to deactivate the current bookmark (-i
902 902 # with no NAME) or listing bookmarks
903 903 elif len(marks) == 0:
904 904 ui.status(_("no bookmarks set\n"))
905 905
906 906 elif inactive:
907 907 if not repo._bookmarkcurrent:
908 908 ui.status(_("no active bookmark\n"))
909 909 else:
910 910 bookmarks.setcurrent(repo, None)
911 911
912 912 else: # show bookmarks
913 913 for bmark, n in sorted(marks.iteritems()):
914 914 current = repo._bookmarkcurrent
915 915 if bmark == current:
916 916 prefix, label = '*', 'bookmarks.current'
917 917 else:
918 918 prefix, label = ' ', ''
919 919
920 920 if ui.quiet:
921 921 ui.write("%s\n" % bmark, label=label)
922 922 else:
923 923 ui.write(" %s %-25s %d:%s\n" % (
924 924 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
925 925 label=label)
926 926
927 927 @command('branch',
928 928 [('f', 'force', None,
929 929 _('set branch name even if it shadows an existing branch')),
930 930 ('C', 'clean', None, _('reset branch name to parent branch name'))],
931 931 _('[-fC] [NAME]'))
932 932 def branch(ui, repo, label=None, **opts):
933 933 """set or show the current branch name
934 934
935 935 .. note::
936 936 Branch names are permanent and global. Use :hg:`bookmark` to create a
937 937 light-weight bookmark instead. See :hg:`help glossary` for more
938 938 information about named branches and bookmarks.
939 939
940 940 With no argument, show the current branch name. With one argument,
941 941 set the working directory branch name (the branch will not exist
942 942 in the repository until the next commit). Standard practice
943 943 recommends that primary development take place on the 'default'
944 944 branch.
945 945
946 946 Unless -f/--force is specified, branch will not let you set a
947 947 branch name that already exists, even if it's inactive.
948 948
949 949 Use -C/--clean to reset the working directory branch to that of
950 950 the parent of the working directory, negating a previous branch
951 951 change.
952 952
953 953 Use the command :hg:`update` to switch to an existing branch. Use
954 954 :hg:`commit --close-branch` to mark this branch as closed.
955 955
956 956 Returns 0 on success.
957 957 """
958 958 if label:
959 959 label = label.strip()
960 960
961 961 if not opts.get('clean') and not label:
962 962 ui.write("%s\n" % repo.dirstate.branch())
963 963 return
964 964
965 965 wlock = repo.wlock()
966 966 try:
967 967 if opts.get('clean'):
968 968 label = repo[None].p1().branch()
969 969 repo.dirstate.setbranch(label)
970 970 ui.status(_('reset working directory to branch %s\n') % label)
971 971 elif label:
972 972 if not opts.get('force') and label in repo.branchmap():
973 973 if label not in [p.branch() for p in repo.parents()]:
974 974 raise util.Abort(_('a branch of the same name already'
975 975 ' exists'),
976 976 # i18n: "it" refers to an existing branch
977 977 hint=_("use 'hg update' to switch to it"))
978 978 scmutil.checknewlabel(repo, label, 'branch')
979 979 repo.dirstate.setbranch(label)
980 980 ui.status(_('marked working directory as branch %s\n') % label)
981 981 ui.status(_('(branches are permanent and global, '
982 982 'did you want a bookmark?)\n'))
983 983 finally:
984 984 wlock.release()
985 985
986 986 @command('branches',
987 987 [('a', 'active', False, _('show only branches that have unmerged heads')),
988 988 ('c', 'closed', False, _('show normal and closed branches'))],
989 989 _('[-ac]'))
990 990 def branches(ui, repo, active=False, closed=False):
991 991 """list repository named branches
992 992
993 993 List the repository's named branches, indicating which ones are
994 994 inactive. If -c/--closed is specified, also list branches which have
995 995 been marked closed (see :hg:`commit --close-branch`).
996 996
997 997 If -a/--active is specified, only show active branches. A branch
998 998 is considered active if it contains repository heads.
999 999
1000 1000 Use the command :hg:`update` to switch to an existing branch.
1001 1001
1002 1002 Returns 0.
1003 1003 """
1004 1004
1005 1005 hexfunc = ui.debugflag and hex or short
1006 1006
1007 1007 activebranches = set([repo[n].branch() for n in repo.heads()])
1008 1008 branches = []
1009 1009 for tag, heads in repo.branchmap().iteritems():
1010 1010 for h in reversed(heads):
1011 1011 ctx = repo[h]
1012 1012 isopen = not ctx.closesbranch()
1013 1013 if isopen:
1014 1014 tip = ctx
1015 1015 break
1016 1016 else:
1017 1017 tip = repo[heads[-1]]
1018 1018 isactive = tag in activebranches and isopen
1019 1019 branches.append((tip, isactive, isopen))
1020 1020 branches.sort(key=lambda i: (i[1], i[0].rev(), i[0].branch(), i[2]),
1021 1021 reverse=True)
1022 1022
1023 1023 for ctx, isactive, isopen in branches:
1024 1024 if (not active) or isactive:
1025 1025 if isactive:
1026 1026 label = 'branches.active'
1027 1027 notice = ''
1028 1028 elif not isopen:
1029 1029 if not closed:
1030 1030 continue
1031 1031 label = 'branches.closed'
1032 1032 notice = _(' (closed)')
1033 1033 else:
1034 1034 label = 'branches.inactive'
1035 1035 notice = _(' (inactive)')
1036 1036 if ctx.branch() == repo.dirstate.branch():
1037 1037 label = 'branches.current'
1038 1038 rev = str(ctx.rev()).rjust(31 - encoding.colwidth(ctx.branch()))
1039 1039 rev = ui.label('%s:%s' % (rev, hexfunc(ctx.node())),
1040 1040 'log.changeset changeset.%s' % ctx.phasestr())
1041 1041 tag = ui.label(ctx.branch(), label)
1042 1042 if ui.quiet:
1043 1043 ui.write("%s\n" % tag)
1044 1044 else:
1045 1045 ui.write("%s %s%s\n" % (tag, rev, notice))
1046 1046
1047 1047 @command('bundle',
1048 1048 [('f', 'force', None, _('run even when the destination is unrelated')),
1049 1049 ('r', 'rev', [], _('a changeset intended to be added to the destination'),
1050 1050 _('REV')),
1051 1051 ('b', 'branch', [], _('a specific branch you would like to bundle'),
1052 1052 _('BRANCH')),
1053 1053 ('', 'base', [],
1054 1054 _('a base changeset assumed to be available at the destination'),
1055 1055 _('REV')),
1056 1056 ('a', 'all', None, _('bundle all changesets in the repository')),
1057 1057 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
1058 1058 ] + remoteopts,
1059 1059 _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
1060 1060 def bundle(ui, repo, fname, dest=None, **opts):
1061 1061 """create a changegroup file
1062 1062
1063 1063 Generate a compressed changegroup file collecting changesets not
1064 1064 known to be in another repository.
1065 1065
1066 1066 If you omit the destination repository, then hg assumes the
1067 1067 destination will have all the nodes you specify with --base
1068 1068 parameters. To create a bundle containing all changesets, use
1069 1069 -a/--all (or --base null).
1070 1070
1071 1071 You can change compression method with the -t/--type option.
1072 1072 The available compression methods are: none, bzip2, and
1073 1073 gzip (by default, bundles are compressed using bzip2).
1074 1074
1075 1075 The bundle file can then be transferred using conventional means
1076 1076 and applied to another repository with the unbundle or pull
1077 1077 command. This is useful when direct push and pull are not
1078 1078 available or when exporting an entire repository is undesirable.
1079 1079
1080 1080 Applying bundles preserves all changeset contents including
1081 1081 permissions, copy/rename information, and revision history.
1082 1082
1083 1083 Returns 0 on success, 1 if no changes found.
1084 1084 """
1085 1085 revs = None
1086 1086 if 'rev' in opts:
1087 1087 revs = scmutil.revrange(repo, opts['rev'])
1088 1088
1089 1089 bundletype = opts.get('type', 'bzip2').lower()
1090 1090 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
1091 1091 bundletype = btypes.get(bundletype)
1092 1092 if bundletype not in changegroup.bundletypes:
1093 1093 raise util.Abort(_('unknown bundle type specified with --type'))
1094 1094
1095 1095 if opts.get('all'):
1096 1096 base = ['null']
1097 1097 else:
1098 1098 base = scmutil.revrange(repo, opts.get('base'))
1099 # TODO: get desired bundlecaps from command line.
1100 bundlecaps = None
1099 1101 if base:
1100 1102 if dest:
1101 1103 raise util.Abort(_("--base is incompatible with specifying "
1102 1104 "a destination"))
1103 1105 common = [repo.lookup(rev) for rev in base]
1104 1106 heads = revs and map(repo.lookup, revs) or revs
1105 cg = repo.getbundle('bundle', heads=heads, common=common)
1107 cg = repo.getbundle('bundle', heads=heads, common=common,
1108 bundlecaps=bundlecaps)
1106 1109 outgoing = None
1107 1110 else:
1108 1111 dest = ui.expandpath(dest or 'default-push', dest or 'default')
1109 1112 dest, branches = hg.parseurl(dest, opts.get('branch'))
1110 1113 other = hg.peer(repo, opts, dest)
1111 1114 revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
1112 1115 heads = revs and map(repo.lookup, revs) or revs
1113 1116 outgoing = discovery.findcommonoutgoing(repo, other,
1114 1117 onlyheads=heads,
1115 1118 force=opts.get('force'),
1116 1119 portable=True)
1117 cg = repo.getlocalbundle('bundle', outgoing)
1120 cg = repo.getlocalbundle('bundle', outgoing, bundlecaps)
1118 1121 if not cg:
1119 1122 scmutil.nochangesfound(ui, repo, outgoing and outgoing.excluded)
1120 1123 return 1
1121 1124
1122 1125 changegroup.writebundle(cg, fname, bundletype)
1123 1126
1124 1127 @command('cat',
1125 1128 [('o', 'output', '',
1126 1129 _('print output to file with formatted name'), _('FORMAT')),
1127 1130 ('r', 'rev', '', _('print the given revision'), _('REV')),
1128 1131 ('', 'decode', None, _('apply any matching decode filter')),
1129 1132 ] + walkopts,
1130 1133 _('[OPTION]... FILE...'))
1131 1134 def cat(ui, repo, file1, *pats, **opts):
1132 1135 """output the current or given revision of files
1133 1136
1134 1137 Print the specified files as they were at the given revision. If
1135 1138 no revision is given, the parent of the working directory is used,
1136 1139 or tip if no revision is checked out.
1137 1140
1138 1141 Output may be to a file, in which case the name of the file is
1139 1142 given using a format string. The formatting rules are the same as
1140 1143 for the export command, with the following additions:
1141 1144
1142 1145 :``%s``: basename of file being printed
1143 1146 :``%d``: dirname of file being printed, or '.' if in repository root
1144 1147 :``%p``: root-relative path name of file being printed
1145 1148
1146 1149 Returns 0 on success.
1147 1150 """
1148 1151 ctx = scmutil.revsingle(repo, opts.get('rev'))
1149 1152 err = 1
1150 1153 m = scmutil.match(ctx, (file1,) + pats, opts)
1151 1154 for abs in ctx.walk(m):
1152 1155 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1153 1156 pathname=abs)
1154 1157 data = ctx[abs].data()
1155 1158 if opts.get('decode'):
1156 1159 data = repo.wwritedata(abs, data)
1157 1160 fp.write(data)
1158 1161 fp.close()
1159 1162 err = 0
1160 1163 return err
1161 1164
1162 1165 @command('^clone',
1163 1166 [('U', 'noupdate', None,
1164 1167 _('the clone will include an empty working copy (only a repository)')),
1165 1168 ('u', 'updaterev', '', _('revision, tag or branch to check out'), _('REV')),
1166 1169 ('r', 'rev', [], _('include the specified changeset'), _('REV')),
1167 1170 ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
1168 1171 ('', 'pull', None, _('use pull protocol to copy metadata')),
1169 1172 ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')),
1170 1173 ] + remoteopts,
1171 1174 _('[OPTION]... SOURCE [DEST]'))
1172 1175 def clone(ui, source, dest=None, **opts):
1173 1176 """make a copy of an existing repository
1174 1177
1175 1178 Create a copy of an existing repository in a new directory.
1176 1179
1177 1180 If no destination directory name is specified, it defaults to the
1178 1181 basename of the source.
1179 1182
1180 1183 The location of the source is added to the new repository's
1181 1184 ``.hg/hgrc`` file, as the default to be used for future pulls.
1182 1185
1183 1186 Only local paths and ``ssh://`` URLs are supported as
1184 1187 destinations. For ``ssh://`` destinations, no working directory or
1185 1188 ``.hg/hgrc`` will be created on the remote side.
1186 1189
1187 1190 To pull only a subset of changesets, specify one or more revisions
1188 1191 identifiers with -r/--rev or branches with -b/--branch. The
1189 1192 resulting clone will contain only the specified changesets and
1190 1193 their ancestors. These options (or 'clone src#rev dest') imply
1191 1194 --pull, even for local source repositories. Note that specifying a
1192 1195 tag will include the tagged changeset but not the changeset
1193 1196 containing the tag.
1194 1197
1195 1198 If the source repository has a bookmark called '@' set, that
1196 1199 revision will be checked out in the new repository by default.
1197 1200
1198 1201 To check out a particular version, use -u/--update, or
1199 1202 -U/--noupdate to create a clone with no working directory.
1200 1203
1201 1204 .. container:: verbose
1202 1205
1203 1206 For efficiency, hardlinks are used for cloning whenever the
1204 1207 source and destination are on the same filesystem (note this
1205 1208 applies only to the repository data, not to the working
1206 1209 directory). Some filesystems, such as AFS, implement hardlinking
1207 1210 incorrectly, but do not report errors. In these cases, use the
1208 1211 --pull option to avoid hardlinking.
1209 1212
1210 1213 In some cases, you can clone repositories and the working
1211 1214 directory using full hardlinks with ::
1212 1215
1213 1216 $ cp -al REPO REPOCLONE
1214 1217
1215 1218 This is the fastest way to clone, but it is not always safe. The
1216 1219 operation is not atomic (making sure REPO is not modified during
1217 1220 the operation is up to you) and you have to make sure your
1218 1221 editor breaks hardlinks (Emacs and most Linux Kernel tools do
1219 1222 so). Also, this is not compatible with certain extensions that
1220 1223 place their metadata under the .hg directory, such as mq.
1221 1224
1222 1225 Mercurial will update the working directory to the first applicable
1223 1226 revision from this list:
1224 1227
1225 1228 a) null if -U or the source repository has no changesets
1226 1229 b) if -u . and the source repository is local, the first parent of
1227 1230 the source repository's working directory
1228 1231 c) the changeset specified with -u (if a branch name, this means the
1229 1232 latest head of that branch)
1230 1233 d) the changeset specified with -r
1231 1234 e) the tipmost head specified with -b
1232 1235 f) the tipmost head specified with the url#branch source syntax
1233 1236 g) the revision marked with the '@' bookmark, if present
1234 1237 h) the tipmost head of the default branch
1235 1238 i) tip
1236 1239
1237 1240 Examples:
1238 1241
1239 1242 - clone a remote repository to a new directory named hg/::
1240 1243
1241 1244 hg clone http://selenic.com/hg
1242 1245
1243 1246 - create a lightweight local clone::
1244 1247
1245 1248 hg clone project/ project-feature/
1246 1249
1247 1250 - clone from an absolute path on an ssh server (note double-slash)::
1248 1251
1249 1252 hg clone ssh://user@server//home/projects/alpha/
1250 1253
1251 1254 - do a high-speed clone over a LAN while checking out a
1252 1255 specified version::
1253 1256
1254 1257 hg clone --uncompressed http://server/repo -u 1.5
1255 1258
1256 1259 - create a repository without changesets after a particular revision::
1257 1260
1258 1261 hg clone -r 04e544 experimental/ good/
1259 1262
1260 1263 - clone (and track) a particular named branch::
1261 1264
1262 1265 hg clone http://selenic.com/hg#stable
1263 1266
1264 1267 See :hg:`help urls` for details on specifying URLs.
1265 1268
1266 1269 Returns 0 on success.
1267 1270 """
1268 1271 if opts.get('noupdate') and opts.get('updaterev'):
1269 1272 raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
1270 1273
1271 1274 r = hg.clone(ui, opts, source, dest,
1272 1275 pull=opts.get('pull'),
1273 1276 stream=opts.get('uncompressed'),
1274 1277 rev=opts.get('rev'),
1275 1278 update=opts.get('updaterev') or not opts.get('noupdate'),
1276 1279 branch=opts.get('branch'))
1277 1280
1278 1281 return r is None
1279 1282
1280 1283 @command('^commit|ci',
1281 1284 [('A', 'addremove', None,
1282 1285 _('mark new/missing files as added/removed before committing')),
1283 1286 ('', 'close-branch', None,
1284 1287 _('mark a branch as closed, hiding it from the branch list')),
1285 1288 ('', 'amend', None, _('amend the parent of the working dir')),
1286 1289 ] + walkopts + commitopts + commitopts2 + subrepoopts,
1287 1290 _('[OPTION]... [FILE]...'))
1288 1291 def commit(ui, repo, *pats, **opts):
1289 1292 """commit the specified files or all outstanding changes
1290 1293
1291 1294 Commit changes to the given files into the repository. Unlike a
1292 1295 centralized SCM, this operation is a local operation. See
1293 1296 :hg:`push` for a way to actively distribute your changes.
1294 1297
1295 1298 If a list of files is omitted, all changes reported by :hg:`status`
1296 1299 will be committed.
1297 1300
1298 1301 If you are committing the result of a merge, do not provide any
1299 1302 filenames or -I/-X filters.
1300 1303
1301 1304 If no commit message is specified, Mercurial starts your
1302 1305 configured editor where you can enter a message. In case your
1303 1306 commit fails, you will find a backup of your message in
1304 1307 ``.hg/last-message.txt``.
1305 1308
1306 1309 The --amend flag can be used to amend the parent of the
1307 1310 working directory with a new commit that contains the changes
1308 1311 in the parent in addition to those currently reported by :hg:`status`,
1309 1312 if there are any. The old commit is stored in a backup bundle in
1310 1313 ``.hg/strip-backup`` (see :hg:`help bundle` and :hg:`help unbundle`
1311 1314 on how to restore it).
1312 1315
1313 1316 Message, user and date are taken from the amended commit unless
1314 1317 specified. When a message isn't specified on the command line,
1315 1318 the editor will open with the message of the amended commit.
1316 1319
1317 1320 It is not possible to amend public changesets (see :hg:`help phases`)
1318 1321 or changesets that have children.
1319 1322
1320 1323 See :hg:`help dates` for a list of formats valid for -d/--date.
1321 1324
1322 1325 Returns 0 on success, 1 if nothing changed.
1323 1326 """
1324 1327 if opts.get('subrepos'):
1325 1328 # Let --subrepos on the command line override config setting.
1326 1329 ui.setconfig('ui', 'commitsubrepos', True)
1327 1330
1328 1331 extra = {}
1329 1332 if opts.get('close_branch'):
1330 1333 extra['close'] = 1
1331 1334
1332 1335 branch = repo[None].branch()
1333 1336 bheads = repo.branchheads(branch)
1334 1337
1335 1338 if opts.get('amend'):
1336 1339 if ui.configbool('ui', 'commitsubrepos'):
1337 1340 raise util.Abort(_('cannot amend recursively'))
1338 1341
1339 1342 old = repo['.']
1340 1343 if old.phase() == phases.public:
1341 1344 raise util.Abort(_('cannot amend public changesets'))
1342 1345 if len(repo[None].parents()) > 1:
1343 1346 raise util.Abort(_('cannot amend while merging'))
1344 1347 if (not obsolete._enabled) and old.children():
1345 1348 raise util.Abort(_('cannot amend changeset with children'))
1346 1349
1347 1350 e = cmdutil.commiteditor
1348 1351 if opts.get('force_editor'):
1349 1352 e = cmdutil.commitforceeditor
1350 1353
1351 1354 def commitfunc(ui, repo, message, match, opts):
1352 1355 editor = e
1353 1356 # message contains text from -m or -l, if it's empty,
1354 1357 # open the editor with the old message
1355 1358 if not message:
1356 1359 message = old.description()
1357 1360 editor = cmdutil.commitforceeditor
1358 1361 return repo.commit(message,
1359 1362 opts.get('user') or old.user(),
1360 1363 opts.get('date') or old.date(),
1361 1364 match,
1362 1365 editor=editor,
1363 1366 extra=extra)
1364 1367
1365 1368 current = repo._bookmarkcurrent
1366 1369 marks = old.bookmarks()
1367 1370 node = cmdutil.amend(ui, repo, commitfunc, old, extra, pats, opts)
1368 1371 if node == old.node():
1369 1372 ui.status(_("nothing changed\n"))
1370 1373 return 1
1371 1374 elif marks:
1372 1375 ui.debug('moving bookmarks %r from %s to %s\n' %
1373 1376 (marks, old.hex(), hex(node)))
1374 1377 newmarks = repo._bookmarks
1375 1378 for bm in marks:
1376 1379 newmarks[bm] = node
1377 1380 if bm == current:
1378 1381 bookmarks.setcurrent(repo, bm)
1379 1382 newmarks.write()
1380 1383 else:
1381 1384 e = cmdutil.commiteditor
1382 1385 if opts.get('force_editor'):
1383 1386 e = cmdutil.commitforceeditor
1384 1387
1385 1388 def commitfunc(ui, repo, message, match, opts):
1386 1389 return repo.commit(message, opts.get('user'), opts.get('date'),
1387 1390 match, editor=e, extra=extra)
1388 1391
1389 1392 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
1390 1393
1391 1394 if not node:
1392 1395 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
1393 1396 if stat[3]:
1394 1397 ui.status(_("nothing changed (%d missing files, see "
1395 1398 "'hg status')\n") % len(stat[3]))
1396 1399 else:
1397 1400 ui.status(_("nothing changed\n"))
1398 1401 return 1
1399 1402
1400 1403 cmdutil.commitstatus(repo, node, branch, bheads, opts)
1401 1404
1402 1405 @command('copy|cp',
1403 1406 [('A', 'after', None, _('record a copy that has already occurred')),
1404 1407 ('f', 'force', None, _('forcibly copy over an existing managed file')),
1405 1408 ] + walkopts + dryrunopts,
1406 1409 _('[OPTION]... [SOURCE]... DEST'))
1407 1410 def copy(ui, repo, *pats, **opts):
1408 1411 """mark files as copied for the next commit
1409 1412
1410 1413 Mark dest as having copies of source files. If dest is a
1411 1414 directory, copies are put in that directory. If dest is a file,
1412 1415 the source must be a single file.
1413 1416
1414 1417 By default, this command copies the contents of files as they
1415 1418 exist in the working directory. If invoked with -A/--after, the
1416 1419 operation is recorded, but no copying is performed.
1417 1420
1418 1421 This command takes effect with the next commit. To undo a copy
1419 1422 before that, see :hg:`revert`.
1420 1423
1421 1424 Returns 0 on success, 1 if errors are encountered.
1422 1425 """
1423 1426 wlock = repo.wlock(False)
1424 1427 try:
1425 1428 return cmdutil.copy(ui, repo, pats, opts)
1426 1429 finally:
1427 1430 wlock.release()
1428 1431
1429 1432 @command('debugancestor', [], _('[INDEX] REV1 REV2'))
1430 1433 def debugancestor(ui, repo, *args):
1431 1434 """find the ancestor revision of two revisions in a given index"""
1432 1435 if len(args) == 3:
1433 1436 index, rev1, rev2 = args
1434 1437 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
1435 1438 lookup = r.lookup
1436 1439 elif len(args) == 2:
1437 1440 if not repo:
1438 1441 raise util.Abort(_("there is no Mercurial repository here "
1439 1442 "(.hg not found)"))
1440 1443 rev1, rev2 = args
1441 1444 r = repo.changelog
1442 1445 lookup = repo.lookup
1443 1446 else:
1444 1447 raise util.Abort(_('either two or three arguments required'))
1445 1448 a = r.ancestor(lookup(rev1), lookup(rev2))
1446 1449 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
1447 1450
1448 1451 @command('debugbuilddag',
1449 1452 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
1450 1453 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
1451 1454 ('n', 'new-file', None, _('add new file at each rev'))],
1452 1455 _('[OPTION]... [TEXT]'))
1453 1456 def debugbuilddag(ui, repo, text=None,
1454 1457 mergeable_file=False,
1455 1458 overwritten_file=False,
1456 1459 new_file=False):
1457 1460 """builds a repo with a given DAG from scratch in the current empty repo
1458 1461
1459 1462 The description of the DAG is read from stdin if not given on the
1460 1463 command line.
1461 1464
1462 1465 Elements:
1463 1466
1464 1467 - "+n" is a linear run of n nodes based on the current default parent
1465 1468 - "." is a single node based on the current default parent
1466 1469 - "$" resets the default parent to null (implied at the start);
1467 1470 otherwise the default parent is always the last node created
1468 1471 - "<p" sets the default parent to the backref p
1469 1472 - "*p" is a fork at parent p, which is a backref
1470 1473 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
1471 1474 - "/p2" is a merge of the preceding node and p2
1472 1475 - ":tag" defines a local tag for the preceding node
1473 1476 - "@branch" sets the named branch for subsequent nodes
1474 1477 - "#...\\n" is a comment up to the end of the line
1475 1478
1476 1479 Whitespace between the above elements is ignored.
1477 1480
1478 1481 A backref is either
1479 1482
1480 1483 - a number n, which references the node curr-n, where curr is the current
1481 1484 node, or
1482 1485 - the name of a local tag you placed earlier using ":tag", or
1483 1486 - empty to denote the default parent.
1484 1487
1485 1488 All string valued-elements are either strictly alphanumeric, or must
1486 1489 be enclosed in double quotes ("..."), with "\\" as escape character.
1487 1490 """
1488 1491
1489 1492 if text is None:
1490 1493 ui.status(_("reading DAG from stdin\n"))
1491 1494 text = ui.fin.read()
1492 1495
1493 1496 cl = repo.changelog
1494 1497 if len(cl) > 0:
1495 1498 raise util.Abort(_('repository is not empty'))
1496 1499
1497 1500 # determine number of revs in DAG
1498 1501 total = 0
1499 1502 for type, data in dagparser.parsedag(text):
1500 1503 if type == 'n':
1501 1504 total += 1
1502 1505
1503 1506 if mergeable_file:
1504 1507 linesperrev = 2
1505 1508 # make a file with k lines per rev
1506 1509 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
1507 1510 initialmergedlines.append("")
1508 1511
1509 1512 tags = []
1510 1513
1511 1514 lock = tr = None
1512 1515 try:
1513 1516 lock = repo.lock()
1514 1517 tr = repo.transaction("builddag")
1515 1518
1516 1519 at = -1
1517 1520 atbranch = 'default'
1518 1521 nodeids = []
1519 1522 id = 0
1520 1523 ui.progress(_('building'), id, unit=_('revisions'), total=total)
1521 1524 for type, data in dagparser.parsedag(text):
1522 1525 if type == 'n':
1523 1526 ui.note(('node %s\n' % str(data)))
1524 1527 id, ps = data
1525 1528
1526 1529 files = []
1527 1530 fctxs = {}
1528 1531
1529 1532 p2 = None
1530 1533 if mergeable_file:
1531 1534 fn = "mf"
1532 1535 p1 = repo[ps[0]]
1533 1536 if len(ps) > 1:
1534 1537 p2 = repo[ps[1]]
1535 1538 pa = p1.ancestor(p2)
1536 1539 base, local, other = [x[fn].data() for x in (pa, p1,
1537 1540 p2)]
1538 1541 m3 = simplemerge.Merge3Text(base, local, other)
1539 1542 ml = [l.strip() for l in m3.merge_lines()]
1540 1543 ml.append("")
1541 1544 elif at > 0:
1542 1545 ml = p1[fn].data().split("\n")
1543 1546 else:
1544 1547 ml = initialmergedlines
1545 1548 ml[id * linesperrev] += " r%i" % id
1546 1549 mergedtext = "\n".join(ml)
1547 1550 files.append(fn)
1548 1551 fctxs[fn] = context.memfilectx(fn, mergedtext)
1549 1552
1550 1553 if overwritten_file:
1551 1554 fn = "of"
1552 1555 files.append(fn)
1553 1556 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1554 1557
1555 1558 if new_file:
1556 1559 fn = "nf%i" % id
1557 1560 files.append(fn)
1558 1561 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1559 1562 if len(ps) > 1:
1560 1563 if not p2:
1561 1564 p2 = repo[ps[1]]
1562 1565 for fn in p2:
1563 1566 if fn.startswith("nf"):
1564 1567 files.append(fn)
1565 1568 fctxs[fn] = p2[fn]
1566 1569
1567 1570 def fctxfn(repo, cx, path):
1568 1571 return fctxs.get(path)
1569 1572
1570 1573 if len(ps) == 0 or ps[0] < 0:
1571 1574 pars = [None, None]
1572 1575 elif len(ps) == 1:
1573 1576 pars = [nodeids[ps[0]], None]
1574 1577 else:
1575 1578 pars = [nodeids[p] for p in ps]
1576 1579 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
1577 1580 date=(id, 0),
1578 1581 user="debugbuilddag",
1579 1582 extra={'branch': atbranch})
1580 1583 nodeid = repo.commitctx(cx)
1581 1584 nodeids.append(nodeid)
1582 1585 at = id
1583 1586 elif type == 'l':
1584 1587 id, name = data
1585 1588 ui.note(('tag %s\n' % name))
1586 1589 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
1587 1590 elif type == 'a':
1588 1591 ui.note(('branch %s\n' % data))
1589 1592 atbranch = data
1590 1593 ui.progress(_('building'), id, unit=_('revisions'), total=total)
1591 1594 tr.close()
1592 1595
1593 1596 if tags:
1594 1597 repo.opener.write("localtags", "".join(tags))
1595 1598 finally:
1596 1599 ui.progress(_('building'), None)
1597 1600 release(tr, lock)
1598 1601
1599 1602 @command('debugbundle', [('a', 'all', None, _('show all details'))], _('FILE'))
1600 1603 def debugbundle(ui, bundlepath, all=None, **opts):
1601 1604 """lists the contents of a bundle"""
1602 1605 f = hg.openpath(ui, bundlepath)
1603 1606 try:
1604 1607 gen = changegroup.readbundle(f, bundlepath)
1605 1608 if all:
1606 1609 ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n"))
1607 1610
1608 1611 def showchunks(named):
1609 1612 ui.write("\n%s\n" % named)
1610 1613 chain = None
1611 1614 while True:
1612 1615 chunkdata = gen.deltachunk(chain)
1613 1616 if not chunkdata:
1614 1617 break
1615 1618 node = chunkdata['node']
1616 1619 p1 = chunkdata['p1']
1617 1620 p2 = chunkdata['p2']
1618 1621 cs = chunkdata['cs']
1619 1622 deltabase = chunkdata['deltabase']
1620 1623 delta = chunkdata['delta']
1621 1624 ui.write("%s %s %s %s %s %s\n" %
1622 1625 (hex(node), hex(p1), hex(p2),
1623 1626 hex(cs), hex(deltabase), len(delta)))
1624 1627 chain = node
1625 1628
1626 1629 chunkdata = gen.changelogheader()
1627 1630 showchunks("changelog")
1628 1631 chunkdata = gen.manifestheader()
1629 1632 showchunks("manifest")
1630 1633 while True:
1631 1634 chunkdata = gen.filelogheader()
1632 1635 if not chunkdata:
1633 1636 break
1634 1637 fname = chunkdata['filename']
1635 1638 showchunks(fname)
1636 1639 else:
1637 1640 chunkdata = gen.changelogheader()
1638 1641 chain = None
1639 1642 while True:
1640 1643 chunkdata = gen.deltachunk(chain)
1641 1644 if not chunkdata:
1642 1645 break
1643 1646 node = chunkdata['node']
1644 1647 ui.write("%s\n" % hex(node))
1645 1648 chain = node
1646 1649 finally:
1647 1650 f.close()
1648 1651
1649 1652 @command('debugcheckstate', [], '')
1650 1653 def debugcheckstate(ui, repo):
1651 1654 """validate the correctness of the current dirstate"""
1652 1655 parent1, parent2 = repo.dirstate.parents()
1653 1656 m1 = repo[parent1].manifest()
1654 1657 m2 = repo[parent2].manifest()
1655 1658 errors = 0
1656 1659 for f in repo.dirstate:
1657 1660 state = repo.dirstate[f]
1658 1661 if state in "nr" and f not in m1:
1659 1662 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
1660 1663 errors += 1
1661 1664 if state in "a" and f in m1:
1662 1665 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
1663 1666 errors += 1
1664 1667 if state in "m" and f not in m1 and f not in m2:
1665 1668 ui.warn(_("%s in state %s, but not in either manifest\n") %
1666 1669 (f, state))
1667 1670 errors += 1
1668 1671 for f in m1:
1669 1672 state = repo.dirstate[f]
1670 1673 if state not in "nrm":
1671 1674 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
1672 1675 errors += 1
1673 1676 if errors:
1674 1677 error = _(".hg/dirstate inconsistent with current parent's manifest")
1675 1678 raise util.Abort(error)
1676 1679
1677 1680 @command('debugcommands', [], _('[COMMAND]'))
1678 1681 def debugcommands(ui, cmd='', *args):
1679 1682 """list all available commands and options"""
1680 1683 for cmd, vals in sorted(table.iteritems()):
1681 1684 cmd = cmd.split('|')[0].strip('^')
1682 1685 opts = ', '.join([i[1] for i in vals[1]])
1683 1686 ui.write('%s: %s\n' % (cmd, opts))
1684 1687
1685 1688 @command('debugcomplete',
1686 1689 [('o', 'options', None, _('show the command options'))],
1687 1690 _('[-o] CMD'))
1688 1691 def debugcomplete(ui, cmd='', **opts):
1689 1692 """returns the completion list associated with the given command"""
1690 1693
1691 1694 if opts.get('options'):
1692 1695 options = []
1693 1696 otables = [globalopts]
1694 1697 if cmd:
1695 1698 aliases, entry = cmdutil.findcmd(cmd, table, False)
1696 1699 otables.append(entry[1])
1697 1700 for t in otables:
1698 1701 for o in t:
1699 1702 if "(DEPRECATED)" in o[3]:
1700 1703 continue
1701 1704 if o[0]:
1702 1705 options.append('-%s' % o[0])
1703 1706 options.append('--%s' % o[1])
1704 1707 ui.write("%s\n" % "\n".join(options))
1705 1708 return
1706 1709
1707 1710 cmdlist = cmdutil.findpossible(cmd, table)
1708 1711 if ui.verbose:
1709 1712 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
1710 1713 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
1711 1714
1712 1715 @command('debugdag',
1713 1716 [('t', 'tags', None, _('use tags as labels')),
1714 1717 ('b', 'branches', None, _('annotate with branch names')),
1715 1718 ('', 'dots', None, _('use dots for runs')),
1716 1719 ('s', 'spaces', None, _('separate elements by spaces'))],
1717 1720 _('[OPTION]... [FILE [REV]...]'))
1718 1721 def debugdag(ui, repo, file_=None, *revs, **opts):
1719 1722 """format the changelog or an index DAG as a concise textual description
1720 1723
1721 1724 If you pass a revlog index, the revlog's DAG is emitted. If you list
1722 1725 revision numbers, they get labeled in the output as rN.
1723 1726
1724 1727 Otherwise, the changelog DAG of the current repo is emitted.
1725 1728 """
1726 1729 spaces = opts.get('spaces')
1727 1730 dots = opts.get('dots')
1728 1731 if file_:
1729 1732 rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1730 1733 revs = set((int(r) for r in revs))
1731 1734 def events():
1732 1735 for r in rlog:
1733 1736 yield 'n', (r, list(set(p for p in rlog.parentrevs(r)
1734 1737 if p != -1)))
1735 1738 if r in revs:
1736 1739 yield 'l', (r, "r%i" % r)
1737 1740 elif repo:
1738 1741 cl = repo.changelog
1739 1742 tags = opts.get('tags')
1740 1743 branches = opts.get('branches')
1741 1744 if tags:
1742 1745 labels = {}
1743 1746 for l, n in repo.tags().items():
1744 1747 labels.setdefault(cl.rev(n), []).append(l)
1745 1748 def events():
1746 1749 b = "default"
1747 1750 for r in cl:
1748 1751 if branches:
1749 1752 newb = cl.read(cl.node(r))[5]['branch']
1750 1753 if newb != b:
1751 1754 yield 'a', newb
1752 1755 b = newb
1753 1756 yield 'n', (r, list(set(p for p in cl.parentrevs(r)
1754 1757 if p != -1)))
1755 1758 if tags:
1756 1759 ls = labels.get(r)
1757 1760 if ls:
1758 1761 for l in ls:
1759 1762 yield 'l', (r, l)
1760 1763 else:
1761 1764 raise util.Abort(_('need repo for changelog dag'))
1762 1765
1763 1766 for line in dagparser.dagtextlines(events(),
1764 1767 addspaces=spaces,
1765 1768 wraplabels=True,
1766 1769 wrapannotations=True,
1767 1770 wrapnonlinear=dots,
1768 1771 usedots=dots,
1769 1772 maxlinewidth=70):
1770 1773 ui.write(line)
1771 1774 ui.write("\n")
1772 1775
1773 1776 @command('debugdata',
1774 1777 [('c', 'changelog', False, _('open changelog')),
1775 1778 ('m', 'manifest', False, _('open manifest'))],
1776 1779 _('-c|-m|FILE REV'))
1777 1780 def debugdata(ui, repo, file_, rev = None, **opts):
1778 1781 """dump the contents of a data file revision"""
1779 1782 if opts.get('changelog') or opts.get('manifest'):
1780 1783 file_, rev = None, file_
1781 1784 elif rev is None:
1782 1785 raise error.CommandError('debugdata', _('invalid arguments'))
1783 1786 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
1784 1787 try:
1785 1788 ui.write(r.revision(r.lookup(rev)))
1786 1789 except KeyError:
1787 1790 raise util.Abort(_('invalid revision identifier %s') % rev)
1788 1791
1789 1792 @command('debugdate',
1790 1793 [('e', 'extended', None, _('try extended date formats'))],
1791 1794 _('[-e] DATE [RANGE]'))
1792 1795 def debugdate(ui, date, range=None, **opts):
1793 1796 """parse and display a date"""
1794 1797 if opts["extended"]:
1795 1798 d = util.parsedate(date, util.extendeddateformats)
1796 1799 else:
1797 1800 d = util.parsedate(date)
1798 1801 ui.write(("internal: %s %s\n") % d)
1799 1802 ui.write(("standard: %s\n") % util.datestr(d))
1800 1803 if range:
1801 1804 m = util.matchdate(range)
1802 1805 ui.write(("match: %s\n") % m(d[0]))
1803 1806
1804 1807 @command('debugdiscovery',
1805 1808 [('', 'old', None, _('use old-style discovery')),
1806 1809 ('', 'nonheads', None,
1807 1810 _('use old-style discovery with non-heads included')),
1808 1811 ] + remoteopts,
1809 1812 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
1810 1813 def debugdiscovery(ui, repo, remoteurl="default", **opts):
1811 1814 """runs the changeset discovery protocol in isolation"""
1812 1815 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
1813 1816 opts.get('branch'))
1814 1817 remote = hg.peer(repo, opts, remoteurl)
1815 1818 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
1816 1819
1817 1820 # make sure tests are repeatable
1818 1821 random.seed(12323)
1819 1822
1820 1823 def doit(localheads, remoteheads, remote=remote):
1821 1824 if opts.get('old'):
1822 1825 if localheads:
1823 1826 raise util.Abort('cannot use localheads with old style '
1824 1827 'discovery')
1825 1828 if not util.safehasattr(remote, 'branches'):
1826 1829 # enable in-client legacy support
1827 1830 remote = localrepo.locallegacypeer(remote.local())
1828 1831 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
1829 1832 force=True)
1830 1833 common = set(common)
1831 1834 if not opts.get('nonheads'):
1832 1835 ui.write(("unpruned common: %s\n") %
1833 1836 " ".join(sorted(short(n) for n in common)))
1834 1837 dag = dagutil.revlogdag(repo.changelog)
1835 1838 all = dag.ancestorset(dag.internalizeall(common))
1836 1839 common = dag.externalizeall(dag.headsetofconnecteds(all))
1837 1840 else:
1838 1841 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
1839 1842 common = set(common)
1840 1843 rheads = set(hds)
1841 1844 lheads = set(repo.heads())
1842 1845 ui.write(("common heads: %s\n") %
1843 1846 " ".join(sorted(short(n) for n in common)))
1844 1847 if lheads <= common:
1845 1848 ui.write(("local is subset\n"))
1846 1849 elif rheads <= common:
1847 1850 ui.write(("remote is subset\n"))
1848 1851
1849 1852 serverlogs = opts.get('serverlog')
1850 1853 if serverlogs:
1851 1854 for filename in serverlogs:
1852 1855 logfile = open(filename, 'r')
1853 1856 try:
1854 1857 line = logfile.readline()
1855 1858 while line:
1856 1859 parts = line.strip().split(';')
1857 1860 op = parts[1]
1858 1861 if op == 'cg':
1859 1862 pass
1860 1863 elif op == 'cgss':
1861 1864 doit(parts[2].split(' '), parts[3].split(' '))
1862 1865 elif op == 'unb':
1863 1866 doit(parts[3].split(' '), parts[2].split(' '))
1864 1867 line = logfile.readline()
1865 1868 finally:
1866 1869 logfile.close()
1867 1870
1868 1871 else:
1869 1872 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
1870 1873 opts.get('remote_head'))
1871 1874 localrevs = opts.get('local_head')
1872 1875 doit(localrevs, remoterevs)
1873 1876
1874 1877 @command('debugfileset',
1875 1878 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
1876 1879 _('[-r REV] FILESPEC'))
1877 1880 def debugfileset(ui, repo, expr, **opts):
1878 1881 '''parse and apply a fileset specification'''
1879 1882 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
1880 1883 if ui.verbose:
1881 1884 tree = fileset.parse(expr)[0]
1882 1885 ui.note(tree, "\n")
1883 1886
1884 1887 for f in fileset.getfileset(ctx, expr):
1885 1888 ui.write("%s\n" % f)
1886 1889
1887 1890 @command('debugfsinfo', [], _('[PATH]'))
1888 1891 def debugfsinfo(ui, path = "."):
1889 1892 """show information detected about current filesystem"""
1890 1893 util.writefile('.debugfsinfo', '')
1891 1894 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1892 1895 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1893 1896 ui.write(('case-sensitive: %s\n') % (util.checkcase('.debugfsinfo')
1894 1897 and 'yes' or 'no'))
1895 1898 os.unlink('.debugfsinfo')
1896 1899
1897 1900 @command('debuggetbundle',
1898 1901 [('H', 'head', [], _('id of head node'), _('ID')),
1899 1902 ('C', 'common', [], _('id of common node'), _('ID')),
1900 1903 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1901 1904 _('REPO FILE [-H|-C ID]...'))
1902 1905 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1903 1906 """retrieves a bundle from a repo
1904 1907
1905 1908 Every ID must be a full-length hex node id string. Saves the bundle to the
1906 1909 given file.
1907 1910 """
1908 1911 repo = hg.peer(ui, opts, repopath)
1909 1912 if not repo.capable('getbundle'):
1910 1913 raise util.Abort("getbundle() not supported by target repository")
1911 1914 args = {}
1912 1915 if common:
1913 1916 args['common'] = [bin(s) for s in common]
1914 1917 if head:
1915 1918 args['heads'] = [bin(s) for s in head]
1919 # TODO: get desired bundlecaps from command line.
1920 args['bundlecaps'] = None
1916 1921 bundle = repo.getbundle('debug', **args)
1917 1922
1918 1923 bundletype = opts.get('type', 'bzip2').lower()
1919 1924 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
1920 1925 bundletype = btypes.get(bundletype)
1921 1926 if bundletype not in changegroup.bundletypes:
1922 1927 raise util.Abort(_('unknown bundle type specified with --type'))
1923 1928 changegroup.writebundle(bundle, bundlepath, bundletype)
1924 1929
1925 1930 @command('debugignore', [], '')
1926 1931 def debugignore(ui, repo, *values, **opts):
1927 1932 """display the combined ignore pattern"""
1928 1933 ignore = repo.dirstate._ignore
1929 1934 includepat = getattr(ignore, 'includepat', None)
1930 1935 if includepat is not None:
1931 1936 ui.write("%s\n" % includepat)
1932 1937 else:
1933 1938 raise util.Abort(_("no ignore patterns found"))
1934 1939
1935 1940 @command('debugindex',
1936 1941 [('c', 'changelog', False, _('open changelog')),
1937 1942 ('m', 'manifest', False, _('open manifest')),
1938 1943 ('f', 'format', 0, _('revlog format'), _('FORMAT'))],
1939 1944 _('[-f FORMAT] -c|-m|FILE'))
1940 1945 def debugindex(ui, repo, file_ = None, **opts):
1941 1946 """dump the contents of an index file"""
1942 1947 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
1943 1948 format = opts.get('format', 0)
1944 1949 if format not in (0, 1):
1945 1950 raise util.Abort(_("unknown format %d") % format)
1946 1951
1947 1952 generaldelta = r.version & revlog.REVLOGGENERALDELTA
1948 1953 if generaldelta:
1949 1954 basehdr = ' delta'
1950 1955 else:
1951 1956 basehdr = ' base'
1952 1957
1953 1958 if format == 0:
1954 1959 ui.write(" rev offset length " + basehdr + " linkrev"
1955 1960 " nodeid p1 p2\n")
1956 1961 elif format == 1:
1957 1962 ui.write(" rev flag offset length"
1958 1963 " size " + basehdr + " link p1 p2"
1959 1964 " nodeid\n")
1960 1965
1961 1966 for i in r:
1962 1967 node = r.node(i)
1963 1968 if generaldelta:
1964 1969 base = r.deltaparent(i)
1965 1970 else:
1966 1971 base = r.chainbase(i)
1967 1972 if format == 0:
1968 1973 try:
1969 1974 pp = r.parents(node)
1970 1975 except Exception:
1971 1976 pp = [nullid, nullid]
1972 1977 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1973 1978 i, r.start(i), r.length(i), base, r.linkrev(i),
1974 1979 short(node), short(pp[0]), short(pp[1])))
1975 1980 elif format == 1:
1976 1981 pr = r.parentrevs(i)
1977 1982 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
1978 1983 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
1979 1984 base, r.linkrev(i), pr[0], pr[1], short(node)))
1980 1985
1981 1986 @command('debugindexdot', [], _('FILE'))
1982 1987 def debugindexdot(ui, repo, file_):
1983 1988 """dump an index DAG as a graphviz dot file"""
1984 1989 r = None
1985 1990 if repo:
1986 1991 filelog = repo.file(file_)
1987 1992 if len(filelog):
1988 1993 r = filelog
1989 1994 if not r:
1990 1995 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1991 1996 ui.write(("digraph G {\n"))
1992 1997 for i in r:
1993 1998 node = r.node(i)
1994 1999 pp = r.parents(node)
1995 2000 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1996 2001 if pp[1] != nullid:
1997 2002 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1998 2003 ui.write("}\n")
1999 2004
2000 2005 @command('debuginstall', [], '')
2001 2006 def debuginstall(ui):
2002 2007 '''test Mercurial installation
2003 2008
2004 2009 Returns 0 on success.
2005 2010 '''
2006 2011
2007 2012 def writetemp(contents):
2008 2013 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
2009 2014 f = os.fdopen(fd, "wb")
2010 2015 f.write(contents)
2011 2016 f.close()
2012 2017 return name
2013 2018
2014 2019 problems = 0
2015 2020
2016 2021 # encoding
2017 2022 ui.status(_("checking encoding (%s)...\n") % encoding.encoding)
2018 2023 try:
2019 2024 encoding.fromlocal("test")
2020 2025 except util.Abort, inst:
2021 2026 ui.write(" %s\n" % inst)
2022 2027 ui.write(_(" (check that your locale is properly set)\n"))
2023 2028 problems += 1
2024 2029
2025 2030 # Python lib
2026 2031 ui.status(_("checking Python lib (%s)...\n")
2027 2032 % os.path.dirname(os.__file__))
2028 2033
2029 2034 # compiled modules
2030 2035 ui.status(_("checking installed modules (%s)...\n")
2031 2036 % os.path.dirname(__file__))
2032 2037 try:
2033 2038 import bdiff, mpatch, base85, osutil
2034 2039 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
2035 2040 except Exception, inst:
2036 2041 ui.write(" %s\n" % inst)
2037 2042 ui.write(_(" One or more extensions could not be found"))
2038 2043 ui.write(_(" (check that you compiled the extensions)\n"))
2039 2044 problems += 1
2040 2045
2041 2046 # templates
2042 2047 import templater
2043 2048 p = templater.templatepath()
2044 2049 ui.status(_("checking templates (%s)...\n") % ' '.join(p))
2045 2050 try:
2046 2051 templater.templater(templater.templatepath("map-cmdline.default"))
2047 2052 except Exception, inst:
2048 2053 ui.write(" %s\n" % inst)
2049 2054 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
2050 2055 problems += 1
2051 2056
2052 2057 # editor
2053 2058 ui.status(_("checking commit editor...\n"))
2054 2059 editor = ui.geteditor()
2055 2060 cmdpath = util.findexe(editor) or util.findexe(editor.split()[0])
2056 2061 if not cmdpath:
2057 2062 if editor == 'vi':
2058 2063 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
2059 2064 ui.write(_(" (specify a commit editor in your configuration"
2060 2065 " file)\n"))
2061 2066 else:
2062 2067 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
2063 2068 ui.write(_(" (specify a commit editor in your configuration"
2064 2069 " file)\n"))
2065 2070 problems += 1
2066 2071
2067 2072 # check username
2068 2073 ui.status(_("checking username...\n"))
2069 2074 try:
2070 2075 ui.username()
2071 2076 except util.Abort, e:
2072 2077 ui.write(" %s\n" % e)
2073 2078 ui.write(_(" (specify a username in your configuration file)\n"))
2074 2079 problems += 1
2075 2080
2076 2081 if not problems:
2077 2082 ui.status(_("no problems detected\n"))
2078 2083 else:
2079 2084 ui.write(_("%s problems detected,"
2080 2085 " please check your install!\n") % problems)
2081 2086
2082 2087 return problems
2083 2088
2084 2089 @command('debugknown', [], _('REPO ID...'))
2085 2090 def debugknown(ui, repopath, *ids, **opts):
2086 2091 """test whether node ids are known to a repo
2087 2092
2088 2093 Every ID must be a full-length hex node id string. Returns a list of 0s
2089 2094 and 1s indicating unknown/known.
2090 2095 """
2091 2096 repo = hg.peer(ui, opts, repopath)
2092 2097 if not repo.capable('known'):
2093 2098 raise util.Abort("known() not supported by target repository")
2094 2099 flags = repo.known([bin(s) for s in ids])
2095 2100 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
2096 2101
2097 2102 @command('debuglabelcomplete', [], _('LABEL...'))
2098 2103 def debuglabelcomplete(ui, repo, *args):
2099 2104 '''complete "labels" - tags, open branch names, bookmark names'''
2100 2105
2101 2106 labels = set()
2102 2107 labels.update(t[0] for t in repo.tagslist())
2103 2108 labels.update(repo._bookmarks.keys())
2104 2109 for heads in repo.branchmap().itervalues():
2105 2110 for h in heads:
2106 2111 ctx = repo[h]
2107 2112 if not ctx.closesbranch():
2108 2113 labels.add(ctx.branch())
2109 2114 completions = set()
2110 2115 if not args:
2111 2116 args = ['']
2112 2117 for a in args:
2113 2118 completions.update(l for l in labels if l.startswith(a))
2114 2119 ui.write('\n'.join(sorted(completions)))
2115 2120 ui.write('\n')
2116 2121
2117 2122 @command('debugobsolete',
2118 2123 [('', 'flags', 0, _('markers flag')),
2119 2124 ] + commitopts2,
2120 2125 _('[OBSOLETED [REPLACEMENT] [REPL... ]'))
2121 2126 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2122 2127 """create arbitrary obsolete marker
2123 2128
2124 2129 With no arguments, displays the list of obsolescence markers."""
2125 2130 def parsenodeid(s):
2126 2131 try:
2127 2132 # We do not use revsingle/revrange functions here to accept
2128 2133 # arbitrary node identifiers, possibly not present in the
2129 2134 # local repository.
2130 2135 n = bin(s)
2131 2136 if len(n) != len(nullid):
2132 2137 raise TypeError()
2133 2138 return n
2134 2139 except TypeError:
2135 2140 raise util.Abort('changeset references must be full hexadecimal '
2136 2141 'node identifiers')
2137 2142
2138 2143 if precursor is not None:
2139 2144 metadata = {}
2140 2145 if 'date' in opts:
2141 2146 metadata['date'] = opts['date']
2142 2147 metadata['user'] = opts['user'] or ui.username()
2143 2148 succs = tuple(parsenodeid(succ) for succ in successors)
2144 2149 l = repo.lock()
2145 2150 try:
2146 2151 tr = repo.transaction('debugobsolete')
2147 2152 try:
2148 2153 repo.obsstore.create(tr, parsenodeid(precursor), succs,
2149 2154 opts['flags'], metadata)
2150 2155 tr.close()
2151 2156 finally:
2152 2157 tr.release()
2153 2158 finally:
2154 2159 l.release()
2155 2160 else:
2156 2161 for m in obsolete.allmarkers(repo):
2157 2162 ui.write(hex(m.precnode()))
2158 2163 for repl in m.succnodes():
2159 2164 ui.write(' ')
2160 2165 ui.write(hex(repl))
2161 2166 ui.write(' %X ' % m._data[2])
2162 2167 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
2163 2168 sorted(m.metadata().items()))))
2164 2169 ui.write('\n')
2165 2170
2166 2171 @command('debugpathcomplete',
2167 2172 [('f', 'full', None, _('complete an entire path')),
2168 2173 ('n', 'normal', None, _('show only normal files')),
2169 2174 ('a', 'added', None, _('show only added files')),
2170 2175 ('r', 'removed', None, _('show only removed files'))],
2171 2176 _('FILESPEC...'))
2172 2177 def debugpathcomplete(ui, repo, *specs, **opts):
2173 2178 '''complete part or all of a tracked path
2174 2179
2175 2180 This command supports shells that offer path name completion. It
2176 2181 currently completes only files already known to the dirstate.
2177 2182
2178 2183 Completion extends only to the next path segment unless
2179 2184 --full is specified, in which case entire paths are used.'''
2180 2185
2181 2186 def complete(path, acceptable):
2182 2187 dirstate = repo.dirstate
2183 2188 spec = os.path.normpath(os.path.join(os.getcwd(), path))
2184 2189 rootdir = repo.root + os.sep
2185 2190 if spec != repo.root and not spec.startswith(rootdir):
2186 2191 return [], []
2187 2192 if os.path.isdir(spec):
2188 2193 spec += '/'
2189 2194 spec = spec[len(rootdir):]
2190 2195 fixpaths = os.sep != '/'
2191 2196 if fixpaths:
2192 2197 spec = spec.replace(os.sep, '/')
2193 2198 speclen = len(spec)
2194 2199 fullpaths = opts['full']
2195 2200 files, dirs = set(), set()
2196 2201 adddir, addfile = dirs.add, files.add
2197 2202 for f, st in dirstate.iteritems():
2198 2203 if f.startswith(spec) and st[0] in acceptable:
2199 2204 if fixpaths:
2200 2205 f = f.replace('/', os.sep)
2201 2206 if fullpaths:
2202 2207 addfile(f)
2203 2208 continue
2204 2209 s = f.find(os.sep, speclen)
2205 2210 if s >= 0:
2206 2211 adddir(f[:s + 1])
2207 2212 else:
2208 2213 addfile(f)
2209 2214 return files, dirs
2210 2215
2211 2216 acceptable = ''
2212 2217 if opts['normal']:
2213 2218 acceptable += 'nm'
2214 2219 if opts['added']:
2215 2220 acceptable += 'a'
2216 2221 if opts['removed']:
2217 2222 acceptable += 'r'
2218 2223 cwd = repo.getcwd()
2219 2224 if not specs:
2220 2225 specs = ['.']
2221 2226
2222 2227 files, dirs = set(), set()
2223 2228 for spec in specs:
2224 2229 f, d = complete(spec, acceptable or 'nmar')
2225 2230 files.update(f)
2226 2231 dirs.update(d)
2227 2232 if not files and len(dirs) == 1:
2228 2233 # force the shell to consider a completion that matches one
2229 2234 # directory and zero files to be ambiguous
2230 2235 dirs.add(iter(dirs).next() + '.')
2231 2236 files.update(dirs)
2232 2237 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2233 2238 ui.write('\n')
2234 2239
2235 2240 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'))
2236 2241 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2237 2242 '''access the pushkey key/value protocol
2238 2243
2239 2244 With two args, list the keys in the given namespace.
2240 2245
2241 2246 With five args, set a key to new if it currently is set to old.
2242 2247 Reports success or failure.
2243 2248 '''
2244 2249
2245 2250 target = hg.peer(ui, {}, repopath)
2246 2251 if keyinfo:
2247 2252 key, old, new = keyinfo
2248 2253 r = target.pushkey(namespace, key, old, new)
2249 2254 ui.status(str(r) + '\n')
2250 2255 return not r
2251 2256 else:
2252 2257 for k, v in sorted(target.listkeys(namespace).iteritems()):
2253 2258 ui.write("%s\t%s\n" % (k.encode('string-escape'),
2254 2259 v.encode('string-escape')))
2255 2260
2256 2261 @command('debugpvec', [], _('A B'))
2257 2262 def debugpvec(ui, repo, a, b=None):
2258 2263 ca = scmutil.revsingle(repo, a)
2259 2264 cb = scmutil.revsingle(repo, b)
2260 2265 pa = pvec.ctxpvec(ca)
2261 2266 pb = pvec.ctxpvec(cb)
2262 2267 if pa == pb:
2263 2268 rel = "="
2264 2269 elif pa > pb:
2265 2270 rel = ">"
2266 2271 elif pa < pb:
2267 2272 rel = "<"
2268 2273 elif pa | pb:
2269 2274 rel = "|"
2270 2275 ui.write(_("a: %s\n") % pa)
2271 2276 ui.write(_("b: %s\n") % pb)
2272 2277 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2273 2278 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
2274 2279 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
2275 2280 pa.distance(pb), rel))
2276 2281
2277 2282 @command('debugrebuilddirstate|debugrebuildstate',
2278 2283 [('r', 'rev', '', _('revision to rebuild to'), _('REV'))],
2279 2284 _('[-r REV]'))
2280 2285 def debugrebuilddirstate(ui, repo, rev):
2281 2286 """rebuild the dirstate as it would look like for the given revision
2282 2287
2283 2288 If no revision is specified the first current parent will be used.
2284 2289
2285 2290 The dirstate will be set to the files of the given revision.
2286 2291 The actual working directory content or existing dirstate
2287 2292 information such as adds or removes is not considered.
2288 2293
2289 2294 One use of this command is to make the next :hg:`status` invocation
2290 2295 check the actual file content.
2291 2296 """
2292 2297 ctx = scmutil.revsingle(repo, rev)
2293 2298 wlock = repo.wlock()
2294 2299 try:
2295 2300 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2296 2301 finally:
2297 2302 wlock.release()
2298 2303
2299 2304 @command('debugrename',
2300 2305 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2301 2306 _('[-r REV] FILE'))
2302 2307 def debugrename(ui, repo, file1, *pats, **opts):
2303 2308 """dump rename information"""
2304 2309
2305 2310 ctx = scmutil.revsingle(repo, opts.get('rev'))
2306 2311 m = scmutil.match(ctx, (file1,) + pats, opts)
2307 2312 for abs in ctx.walk(m):
2308 2313 fctx = ctx[abs]
2309 2314 o = fctx.filelog().renamed(fctx.filenode())
2310 2315 rel = m.rel(abs)
2311 2316 if o:
2312 2317 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2313 2318 else:
2314 2319 ui.write(_("%s not renamed\n") % rel)
2315 2320
2316 2321 @command('debugrevlog',
2317 2322 [('c', 'changelog', False, _('open changelog')),
2318 2323 ('m', 'manifest', False, _('open manifest')),
2319 2324 ('d', 'dump', False, _('dump index data'))],
2320 2325 _('-c|-m|FILE'))
2321 2326 def debugrevlog(ui, repo, file_ = None, **opts):
2322 2327 """show data and statistics about a revlog"""
2323 2328 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2324 2329
2325 2330 if opts.get("dump"):
2326 2331 numrevs = len(r)
2327 2332 ui.write("# rev p1rev p2rev start end deltastart base p1 p2"
2328 2333 " rawsize totalsize compression heads\n")
2329 2334 ts = 0
2330 2335 heads = set()
2331 2336 for rev in xrange(numrevs):
2332 2337 dbase = r.deltaparent(rev)
2333 2338 if dbase == -1:
2334 2339 dbase = rev
2335 2340 cbase = r.chainbase(rev)
2336 2341 p1, p2 = r.parentrevs(rev)
2337 2342 rs = r.rawsize(rev)
2338 2343 ts = ts + rs
2339 2344 heads -= set(r.parentrevs(rev))
2340 2345 heads.add(rev)
2341 2346 ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" %
2342 2347 (rev, p1, p2, r.start(rev), r.end(rev),
2343 2348 r.start(dbase), r.start(cbase),
2344 2349 r.start(p1), r.start(p2),
2345 2350 rs, ts, ts / r.end(rev), len(heads)))
2346 2351 return 0
2347 2352
2348 2353 v = r.version
2349 2354 format = v & 0xFFFF
2350 2355 flags = []
2351 2356 gdelta = False
2352 2357 if v & revlog.REVLOGNGINLINEDATA:
2353 2358 flags.append('inline')
2354 2359 if v & revlog.REVLOGGENERALDELTA:
2355 2360 gdelta = True
2356 2361 flags.append('generaldelta')
2357 2362 if not flags:
2358 2363 flags = ['(none)']
2359 2364
2360 2365 nummerges = 0
2361 2366 numfull = 0
2362 2367 numprev = 0
2363 2368 nump1 = 0
2364 2369 nump2 = 0
2365 2370 numother = 0
2366 2371 nump1prev = 0
2367 2372 nump2prev = 0
2368 2373 chainlengths = []
2369 2374
2370 2375 datasize = [None, 0, 0L]
2371 2376 fullsize = [None, 0, 0L]
2372 2377 deltasize = [None, 0, 0L]
2373 2378
2374 2379 def addsize(size, l):
2375 2380 if l[0] is None or size < l[0]:
2376 2381 l[0] = size
2377 2382 if size > l[1]:
2378 2383 l[1] = size
2379 2384 l[2] += size
2380 2385
2381 2386 numrevs = len(r)
2382 2387 for rev in xrange(numrevs):
2383 2388 p1, p2 = r.parentrevs(rev)
2384 2389 delta = r.deltaparent(rev)
2385 2390 if format > 0:
2386 2391 addsize(r.rawsize(rev), datasize)
2387 2392 if p2 != nullrev:
2388 2393 nummerges += 1
2389 2394 size = r.length(rev)
2390 2395 if delta == nullrev:
2391 2396 chainlengths.append(0)
2392 2397 numfull += 1
2393 2398 addsize(size, fullsize)
2394 2399 else:
2395 2400 chainlengths.append(chainlengths[delta] + 1)
2396 2401 addsize(size, deltasize)
2397 2402 if delta == rev - 1:
2398 2403 numprev += 1
2399 2404 if delta == p1:
2400 2405 nump1prev += 1
2401 2406 elif delta == p2:
2402 2407 nump2prev += 1
2403 2408 elif delta == p1:
2404 2409 nump1 += 1
2405 2410 elif delta == p2:
2406 2411 nump2 += 1
2407 2412 elif delta != nullrev:
2408 2413 numother += 1
2409 2414
2410 2415 # Adjust size min value for empty cases
2411 2416 for size in (datasize, fullsize, deltasize):
2412 2417 if size[0] is None:
2413 2418 size[0] = 0
2414 2419
2415 2420 numdeltas = numrevs - numfull
2416 2421 numoprev = numprev - nump1prev - nump2prev
2417 2422 totalrawsize = datasize[2]
2418 2423 datasize[2] /= numrevs
2419 2424 fulltotal = fullsize[2]
2420 2425 fullsize[2] /= numfull
2421 2426 deltatotal = deltasize[2]
2422 2427 if numrevs - numfull > 0:
2423 2428 deltasize[2] /= numrevs - numfull
2424 2429 totalsize = fulltotal + deltatotal
2425 2430 avgchainlen = sum(chainlengths) / numrevs
2426 2431 compratio = totalrawsize / totalsize
2427 2432
2428 2433 basedfmtstr = '%%%dd\n'
2429 2434 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2430 2435
2431 2436 def dfmtstr(max):
2432 2437 return basedfmtstr % len(str(max))
2433 2438 def pcfmtstr(max, padding=0):
2434 2439 return basepcfmtstr % (len(str(max)), ' ' * padding)
2435 2440
2436 2441 def pcfmt(value, total):
2437 2442 return (value, 100 * float(value) / total)
2438 2443
2439 2444 ui.write(('format : %d\n') % format)
2440 2445 ui.write(('flags : %s\n') % ', '.join(flags))
2441 2446
2442 2447 ui.write('\n')
2443 2448 fmt = pcfmtstr(totalsize)
2444 2449 fmt2 = dfmtstr(totalsize)
2445 2450 ui.write(('revisions : ') + fmt2 % numrevs)
2446 2451 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2447 2452 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2448 2453 ui.write(('revisions : ') + fmt2 % numrevs)
2449 2454 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
2450 2455 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2451 2456 ui.write(('revision size : ') + fmt2 % totalsize)
2452 2457 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
2453 2458 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2454 2459
2455 2460 ui.write('\n')
2456 2461 fmt = dfmtstr(max(avgchainlen, compratio))
2457 2462 ui.write(('avg chain length : ') + fmt % avgchainlen)
2458 2463 ui.write(('compression ratio : ') + fmt % compratio)
2459 2464
2460 2465 if format > 0:
2461 2466 ui.write('\n')
2462 2467 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2463 2468 % tuple(datasize))
2464 2469 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2465 2470 % tuple(fullsize))
2466 2471 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2467 2472 % tuple(deltasize))
2468 2473
2469 2474 if numdeltas > 0:
2470 2475 ui.write('\n')
2471 2476 fmt = pcfmtstr(numdeltas)
2472 2477 fmt2 = pcfmtstr(numdeltas, 4)
2473 2478 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2474 2479 if numprev > 0:
2475 2480 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2476 2481 numprev))
2477 2482 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2478 2483 numprev))
2479 2484 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2480 2485 numprev))
2481 2486 if gdelta:
2482 2487 ui.write(('deltas against p1 : ')
2483 2488 + fmt % pcfmt(nump1, numdeltas))
2484 2489 ui.write(('deltas against p2 : ')
2485 2490 + fmt % pcfmt(nump2, numdeltas))
2486 2491 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2487 2492 numdeltas))
2488 2493
2489 2494 @command('debugrevspec', [], ('REVSPEC'))
2490 2495 def debugrevspec(ui, repo, expr):
2491 2496 """parse and apply a revision specification
2492 2497
2493 2498 Use --verbose to print the parsed tree before and after aliases
2494 2499 expansion.
2495 2500 """
2496 2501 if ui.verbose:
2497 2502 tree = revset.parse(expr)[0]
2498 2503 ui.note(revset.prettyformat(tree), "\n")
2499 2504 newtree = revset.findaliases(ui, tree)
2500 2505 if newtree != tree:
2501 2506 ui.note(revset.prettyformat(newtree), "\n")
2502 2507 func = revset.match(ui, expr)
2503 2508 for c in func(repo, range(len(repo))):
2504 2509 ui.write("%s\n" % c)
2505 2510
2506 2511 @command('debugsetparents', [], _('REV1 [REV2]'))
2507 2512 def debugsetparents(ui, repo, rev1, rev2=None):
2508 2513 """manually set the parents of the current working directory
2509 2514
2510 2515 This is useful for writing repository conversion tools, but should
2511 2516 be used with care.
2512 2517
2513 2518 Returns 0 on success.
2514 2519 """
2515 2520
2516 2521 r1 = scmutil.revsingle(repo, rev1).node()
2517 2522 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2518 2523
2519 2524 wlock = repo.wlock()
2520 2525 try:
2521 2526 repo.setparents(r1, r2)
2522 2527 finally:
2523 2528 wlock.release()
2524 2529
2525 2530 @command('debugdirstate|debugstate',
2526 2531 [('', 'nodates', None, _('do not display the saved mtime')),
2527 2532 ('', 'datesort', None, _('sort by saved mtime'))],
2528 2533 _('[OPTION]...'))
2529 2534 def debugstate(ui, repo, nodates=None, datesort=None):
2530 2535 """show the contents of the current dirstate"""
2531 2536 timestr = ""
2532 2537 showdate = not nodates
2533 2538 if datesort:
2534 2539 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
2535 2540 else:
2536 2541 keyfunc = None # sort by filename
2537 2542 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
2538 2543 if showdate:
2539 2544 if ent[3] == -1:
2540 2545 # Pad or slice to locale representation
2541 2546 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
2542 2547 time.localtime(0)))
2543 2548 timestr = 'unset'
2544 2549 timestr = (timestr[:locale_len] +
2545 2550 ' ' * (locale_len - len(timestr)))
2546 2551 else:
2547 2552 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
2548 2553 time.localtime(ent[3]))
2549 2554 if ent[1] & 020000:
2550 2555 mode = 'lnk'
2551 2556 else:
2552 2557 mode = '%3o' % (ent[1] & 0777 & ~util.umask)
2553 2558 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
2554 2559 for f in repo.dirstate.copies():
2555 2560 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
2556 2561
2557 2562 @command('debugsub',
2558 2563 [('r', 'rev', '',
2559 2564 _('revision to check'), _('REV'))],
2560 2565 _('[-r REV] [REV]'))
2561 2566 def debugsub(ui, repo, rev=None):
2562 2567 ctx = scmutil.revsingle(repo, rev, None)
2563 2568 for k, v in sorted(ctx.substate.items()):
2564 2569 ui.write(('path %s\n') % k)
2565 2570 ui.write((' source %s\n') % v[0])
2566 2571 ui.write((' revision %s\n') % v[1])
2567 2572
2568 2573 @command('debugsuccessorssets',
2569 2574 [],
2570 2575 _('[REV]'))
2571 2576 def debugsuccessorssets(ui, repo, *revs):
2572 2577 """show set of successors for revision
2573 2578
2574 2579 A successors set of changeset A is a consistent group of revisions that
2575 2580 succeed A. It contains non-obsolete changesets only.
2576 2581
2577 2582 In most cases a changeset A has a single successors set containing a single
2578 2583 successor (changeset A replaced by A').
2579 2584
2580 2585 A changeset that is made obsolete with no successors are called "pruned".
2581 2586 Such changesets have no successors sets at all.
2582 2587
2583 2588 A changeset that has been "split" will have a successors set containing
2584 2589 more than one successor.
2585 2590
2586 2591 A changeset that has been rewritten in multiple different ways is called
2587 2592 "divergent". Such changesets have multiple successor sets (each of which
2588 2593 may also be split, i.e. have multiple successors).
2589 2594
2590 2595 Results are displayed as follows::
2591 2596
2592 2597 <rev1>
2593 2598 <successors-1A>
2594 2599 <rev2>
2595 2600 <successors-2A>
2596 2601 <successors-2B1> <successors-2B2> <successors-2B3>
2597 2602
2598 2603 Here rev2 has two possible (i.e. divergent) successors sets. The first
2599 2604 holds one element, whereas the second holds three (i.e. the changeset has
2600 2605 been split).
2601 2606 """
2602 2607 # passed to successorssets caching computation from one call to another
2603 2608 cache = {}
2604 2609 ctx2str = str
2605 2610 node2str = short
2606 2611 if ui.debug():
2607 2612 def ctx2str(ctx):
2608 2613 return ctx.hex()
2609 2614 node2str = hex
2610 2615 for rev in scmutil.revrange(repo, revs):
2611 2616 ctx = repo[rev]
2612 2617 ui.write('%s\n'% ctx2str(ctx))
2613 2618 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
2614 2619 if succsset:
2615 2620 ui.write(' ')
2616 2621 ui.write(node2str(succsset[0]))
2617 2622 for node in succsset[1:]:
2618 2623 ui.write(' ')
2619 2624 ui.write(node2str(node))
2620 2625 ui.write('\n')
2621 2626
2622 2627 @command('debugwalk', walkopts, _('[OPTION]... [FILE]...'))
2623 2628 def debugwalk(ui, repo, *pats, **opts):
2624 2629 """show how files match on given patterns"""
2625 2630 m = scmutil.match(repo[None], pats, opts)
2626 2631 items = list(repo.walk(m))
2627 2632 if not items:
2628 2633 return
2629 2634 f = lambda fn: fn
2630 2635 if ui.configbool('ui', 'slash') and os.sep != '/':
2631 2636 f = lambda fn: util.normpath(fn)
2632 2637 fmt = 'f %%-%ds %%-%ds %%s' % (
2633 2638 max([len(abs) for abs in items]),
2634 2639 max([len(m.rel(abs)) for abs in items]))
2635 2640 for abs in items:
2636 2641 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2637 2642 ui.write("%s\n" % line.rstrip())
2638 2643
2639 2644 @command('debugwireargs',
2640 2645 [('', 'three', '', 'three'),
2641 2646 ('', 'four', '', 'four'),
2642 2647 ('', 'five', '', 'five'),
2643 2648 ] + remoteopts,
2644 2649 _('REPO [OPTIONS]... [ONE [TWO]]'))
2645 2650 def debugwireargs(ui, repopath, *vals, **opts):
2646 2651 repo = hg.peer(ui, opts, repopath)
2647 2652 for opt in remoteopts:
2648 2653 del opts[opt[1]]
2649 2654 args = {}
2650 2655 for k, v in opts.iteritems():
2651 2656 if v:
2652 2657 args[k] = v
2653 2658 # run twice to check that we don't mess up the stream for the next command
2654 2659 res1 = repo.debugwireargs(*vals, **args)
2655 2660 res2 = repo.debugwireargs(*vals, **args)
2656 2661 ui.write("%s\n" % res1)
2657 2662 if res1 != res2:
2658 2663 ui.warn("%s\n" % res2)
2659 2664
2660 2665 @command('^diff',
2661 2666 [('r', 'rev', [], _('revision'), _('REV')),
2662 2667 ('c', 'change', '', _('change made by revision'), _('REV'))
2663 2668 ] + diffopts + diffopts2 + walkopts + subrepoopts,
2664 2669 _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'))
2665 2670 def diff(ui, repo, *pats, **opts):
2666 2671 """diff repository (or selected files)
2667 2672
2668 2673 Show differences between revisions for the specified files.
2669 2674
2670 2675 Differences between files are shown using the unified diff format.
2671 2676
2672 2677 .. note::
2673 2678 diff may generate unexpected results for merges, as it will
2674 2679 default to comparing against the working directory's first
2675 2680 parent changeset if no revisions are specified.
2676 2681
2677 2682 When two revision arguments are given, then changes are shown
2678 2683 between those revisions. If only one revision is specified then
2679 2684 that revision is compared to the working directory, and, when no
2680 2685 revisions are specified, the working directory files are compared
2681 2686 to its parent.
2682 2687
2683 2688 Alternatively you can specify -c/--change with a revision to see
2684 2689 the changes in that changeset relative to its first parent.
2685 2690
2686 2691 Without the -a/--text option, diff will avoid generating diffs of
2687 2692 files it detects as binary. With -a, diff will generate a diff
2688 2693 anyway, probably with undesirable results.
2689 2694
2690 2695 Use the -g/--git option to generate diffs in the git extended diff
2691 2696 format. For more information, read :hg:`help diffs`.
2692 2697
2693 2698 .. container:: verbose
2694 2699
2695 2700 Examples:
2696 2701
2697 2702 - compare a file in the current working directory to its parent::
2698 2703
2699 2704 hg diff foo.c
2700 2705
2701 2706 - compare two historical versions of a directory, with rename info::
2702 2707
2703 2708 hg diff --git -r 1.0:1.2 lib/
2704 2709
2705 2710 - get change stats relative to the last change on some date::
2706 2711
2707 2712 hg diff --stat -r "date('may 2')"
2708 2713
2709 2714 - diff all newly-added files that contain a keyword::
2710 2715
2711 2716 hg diff "set:added() and grep(GNU)"
2712 2717
2713 2718 - compare a revision and its parents::
2714 2719
2715 2720 hg diff -c 9353 # compare against first parent
2716 2721 hg diff -r 9353^:9353 # same using revset syntax
2717 2722 hg diff -r 9353^2:9353 # compare against the second parent
2718 2723
2719 2724 Returns 0 on success.
2720 2725 """
2721 2726
2722 2727 revs = opts.get('rev')
2723 2728 change = opts.get('change')
2724 2729 stat = opts.get('stat')
2725 2730 reverse = opts.get('reverse')
2726 2731
2727 2732 if revs and change:
2728 2733 msg = _('cannot specify --rev and --change at the same time')
2729 2734 raise util.Abort(msg)
2730 2735 elif change:
2731 2736 node2 = scmutil.revsingle(repo, change, None).node()
2732 2737 node1 = repo[node2].p1().node()
2733 2738 else:
2734 2739 node1, node2 = scmutil.revpair(repo, revs)
2735 2740
2736 2741 if reverse:
2737 2742 node1, node2 = node2, node1
2738 2743
2739 2744 diffopts = patch.diffopts(ui, opts)
2740 2745 m = scmutil.match(repo[node2], pats, opts)
2741 2746 cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
2742 2747 listsubrepos=opts.get('subrepos'))
2743 2748
2744 2749 @command('^export',
2745 2750 [('o', 'output', '',
2746 2751 _('print output to file with formatted name'), _('FORMAT')),
2747 2752 ('', 'switch-parent', None, _('diff against the second parent')),
2748 2753 ('r', 'rev', [], _('revisions to export'), _('REV')),
2749 2754 ] + diffopts,
2750 2755 _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'))
2751 2756 def export(ui, repo, *changesets, **opts):
2752 2757 """dump the header and diffs for one or more changesets
2753 2758
2754 2759 Print the changeset header and diffs for one or more revisions.
2755 2760 If no revision is given, the parent of the working directory is used.
2756 2761
2757 2762 The information shown in the changeset header is: author, date,
2758 2763 branch name (if non-default), changeset hash, parent(s) and commit
2759 2764 comment.
2760 2765
2761 2766 .. note::
2762 2767 export may generate unexpected diff output for merge
2763 2768 changesets, as it will compare the merge changeset against its
2764 2769 first parent only.
2765 2770
2766 2771 Output may be to a file, in which case the name of the file is
2767 2772 given using a format string. The formatting rules are as follows:
2768 2773
2769 2774 :``%%``: literal "%" character
2770 2775 :``%H``: changeset hash (40 hexadecimal digits)
2771 2776 :``%N``: number of patches being generated
2772 2777 :``%R``: changeset revision number
2773 2778 :``%b``: basename of the exporting repository
2774 2779 :``%h``: short-form changeset hash (12 hexadecimal digits)
2775 2780 :``%m``: first line of the commit message (only alphanumeric characters)
2776 2781 :``%n``: zero-padded sequence number, starting at 1
2777 2782 :``%r``: zero-padded changeset revision number
2778 2783
2779 2784 Without the -a/--text option, export will avoid generating diffs
2780 2785 of files it detects as binary. With -a, export will generate a
2781 2786 diff anyway, probably with undesirable results.
2782 2787
2783 2788 Use the -g/--git option to generate diffs in the git extended diff
2784 2789 format. See :hg:`help diffs` for more information.
2785 2790
2786 2791 With the --switch-parent option, the diff will be against the
2787 2792 second parent. It can be useful to review a merge.
2788 2793
2789 2794 .. container:: verbose
2790 2795
2791 2796 Examples:
2792 2797
2793 2798 - use export and import to transplant a bugfix to the current
2794 2799 branch::
2795 2800
2796 2801 hg export -r 9353 | hg import -
2797 2802
2798 2803 - export all the changesets between two revisions to a file with
2799 2804 rename information::
2800 2805
2801 2806 hg export --git -r 123:150 > changes.txt
2802 2807
2803 2808 - split outgoing changes into a series of patches with
2804 2809 descriptive names::
2805 2810
2806 2811 hg export -r "outgoing()" -o "%n-%m.patch"
2807 2812
2808 2813 Returns 0 on success.
2809 2814 """
2810 2815 changesets += tuple(opts.get('rev', []))
2811 2816 if not changesets:
2812 2817 changesets = ['.']
2813 2818 revs = scmutil.revrange(repo, changesets)
2814 2819 if not revs:
2815 2820 raise util.Abort(_("export requires at least one changeset"))
2816 2821 if len(revs) > 1:
2817 2822 ui.note(_('exporting patches:\n'))
2818 2823 else:
2819 2824 ui.note(_('exporting patch:\n'))
2820 2825 cmdutil.export(repo, revs, template=opts.get('output'),
2821 2826 switch_parent=opts.get('switch_parent'),
2822 2827 opts=patch.diffopts(ui, opts))
2823 2828
2824 2829 @command('^forget', walkopts, _('[OPTION]... FILE...'))
2825 2830 def forget(ui, repo, *pats, **opts):
2826 2831 """forget the specified files on the next commit
2827 2832
2828 2833 Mark the specified files so they will no longer be tracked
2829 2834 after the next commit.
2830 2835
2831 2836 This only removes files from the current branch, not from the
2832 2837 entire project history, and it does not delete them from the
2833 2838 working directory.
2834 2839
2835 2840 To undo a forget before the next commit, see :hg:`add`.
2836 2841
2837 2842 .. container:: verbose
2838 2843
2839 2844 Examples:
2840 2845
2841 2846 - forget newly-added binary files::
2842 2847
2843 2848 hg forget "set:added() and binary()"
2844 2849
2845 2850 - forget files that would be excluded by .hgignore::
2846 2851
2847 2852 hg forget "set:hgignore()"
2848 2853
2849 2854 Returns 0 on success.
2850 2855 """
2851 2856
2852 2857 if not pats:
2853 2858 raise util.Abort(_('no files specified'))
2854 2859
2855 2860 m = scmutil.match(repo[None], pats, opts)
2856 2861 rejected = cmdutil.forget(ui, repo, m, prefix="", explicitonly=False)[0]
2857 2862 return rejected and 1 or 0
2858 2863
2859 2864 @command(
2860 2865 'graft',
2861 2866 [('r', 'rev', [], _('revisions to graft'), _('REV')),
2862 2867 ('c', 'continue', False, _('resume interrupted graft')),
2863 2868 ('e', 'edit', False, _('invoke editor on commit messages')),
2864 2869 ('', 'log', None, _('append graft info to log message')),
2865 2870 ('D', 'currentdate', False,
2866 2871 _('record the current date as commit date')),
2867 2872 ('U', 'currentuser', False,
2868 2873 _('record the current user as committer'), _('DATE'))]
2869 2874 + commitopts2 + mergetoolopts + dryrunopts,
2870 2875 _('[OPTION]... [-r] REV...'))
2871 2876 def graft(ui, repo, *revs, **opts):
2872 2877 '''copy changes from other branches onto the current branch
2873 2878
2874 2879 This command uses Mercurial's merge logic to copy individual
2875 2880 changes from other branches without merging branches in the
2876 2881 history graph. This is sometimes known as 'backporting' or
2877 2882 'cherry-picking'. By default, graft will copy user, date, and
2878 2883 description from the source changesets.
2879 2884
2880 2885 Changesets that are ancestors of the current revision, that have
2881 2886 already been grafted, or that are merges will be skipped.
2882 2887
2883 2888 If --log is specified, log messages will have a comment appended
2884 2889 of the form::
2885 2890
2886 2891 (grafted from CHANGESETHASH)
2887 2892
2888 2893 If a graft merge results in conflicts, the graft process is
2889 2894 interrupted so that the current merge can be manually resolved.
2890 2895 Once all conflicts are addressed, the graft process can be
2891 2896 continued with the -c/--continue option.
2892 2897
2893 2898 .. note::
2894 2899 The -c/--continue option does not reapply earlier options.
2895 2900
2896 2901 .. container:: verbose
2897 2902
2898 2903 Examples:
2899 2904
2900 2905 - copy a single change to the stable branch and edit its description::
2901 2906
2902 2907 hg update stable
2903 2908 hg graft --edit 9393
2904 2909
2905 2910 - graft a range of changesets with one exception, updating dates::
2906 2911
2907 2912 hg graft -D "2085::2093 and not 2091"
2908 2913
2909 2914 - continue a graft after resolving conflicts::
2910 2915
2911 2916 hg graft -c
2912 2917
2913 2918 - show the source of a grafted changeset::
2914 2919
2915 2920 hg log --debug -r tip
2916 2921
2917 2922 Returns 0 on successful completion.
2918 2923 '''
2919 2924
2920 2925 revs = list(revs)
2921 2926 revs.extend(opts['rev'])
2922 2927
2923 2928 if not opts.get('user') and opts.get('currentuser'):
2924 2929 opts['user'] = ui.username()
2925 2930 if not opts.get('date') and opts.get('currentdate'):
2926 2931 opts['date'] = "%d %d" % util.makedate()
2927 2932
2928 2933 editor = None
2929 2934 if opts.get('edit'):
2930 2935 editor = cmdutil.commitforceeditor
2931 2936
2932 2937 cont = False
2933 2938 if opts['continue']:
2934 2939 cont = True
2935 2940 if revs:
2936 2941 raise util.Abort(_("can't specify --continue and revisions"))
2937 2942 # read in unfinished revisions
2938 2943 try:
2939 2944 nodes = repo.opener.read('graftstate').splitlines()
2940 2945 revs = [repo[node].rev() for node in nodes]
2941 2946 except IOError, inst:
2942 2947 if inst.errno != errno.ENOENT:
2943 2948 raise
2944 2949 raise util.Abort(_("no graft state found, can't continue"))
2945 2950 else:
2946 2951 cmdutil.bailifchanged(repo)
2947 2952 if not revs:
2948 2953 raise util.Abort(_('no revisions specified'))
2949 2954 revs = scmutil.revrange(repo, revs)
2950 2955
2951 2956 # check for merges
2952 2957 for rev in repo.revs('%ld and merge()', revs):
2953 2958 ui.warn(_('skipping ungraftable merge revision %s\n') % rev)
2954 2959 revs.remove(rev)
2955 2960 if not revs:
2956 2961 return -1
2957 2962
2958 2963 # check for ancestors of dest branch
2959 2964 crev = repo['.'].rev()
2960 2965 ancestors = repo.changelog.ancestors([crev], inclusive=True)
2961 2966 # don't mutate while iterating, create a copy
2962 2967 for rev in list(revs):
2963 2968 if rev in ancestors:
2964 2969 ui.warn(_('skipping ancestor revision %s\n') % rev)
2965 2970 revs.remove(rev)
2966 2971 if not revs:
2967 2972 return -1
2968 2973
2969 2974 # analyze revs for earlier grafts
2970 2975 ids = {}
2971 2976 for ctx in repo.set("%ld", revs):
2972 2977 ids[ctx.hex()] = ctx.rev()
2973 2978 n = ctx.extra().get('source')
2974 2979 if n:
2975 2980 ids[n] = ctx.rev()
2976 2981
2977 2982 # check ancestors for earlier grafts
2978 2983 ui.debug('scanning for duplicate grafts\n')
2979 2984
2980 2985 for rev in repo.changelog.findmissingrevs(revs, [crev]):
2981 2986 ctx = repo[rev]
2982 2987 n = ctx.extra().get('source')
2983 2988 if n in ids:
2984 2989 r = repo[n].rev()
2985 2990 if r in revs:
2986 2991 ui.warn(_('skipping already grafted revision %s\n') % r)
2987 2992 revs.remove(r)
2988 2993 elif ids[n] in revs:
2989 2994 ui.warn(_('skipping already grafted revision %s '
2990 2995 '(same origin %d)\n') % (ids[n], r))
2991 2996 revs.remove(ids[n])
2992 2997 elif ctx.hex() in ids:
2993 2998 r = ids[ctx.hex()]
2994 2999 ui.warn(_('skipping already grafted revision %s '
2995 3000 '(was grafted from %d)\n') % (r, rev))
2996 3001 revs.remove(r)
2997 3002 if not revs:
2998 3003 return -1
2999 3004
3000 3005 wlock = repo.wlock()
3001 3006 try:
3002 3007 current = repo['.']
3003 3008 for pos, ctx in enumerate(repo.set("%ld", revs)):
3004 3009
3005 3010 ui.status(_('grafting revision %s\n') % ctx.rev())
3006 3011 if opts.get('dry_run'):
3007 3012 continue
3008 3013
3009 3014 source = ctx.extra().get('source')
3010 3015 if not source:
3011 3016 source = ctx.hex()
3012 3017 extra = {'source': source}
3013 3018 user = ctx.user()
3014 3019 if opts.get('user'):
3015 3020 user = opts['user']
3016 3021 date = ctx.date()
3017 3022 if opts.get('date'):
3018 3023 date = opts['date']
3019 3024 message = ctx.description()
3020 3025 if opts.get('log'):
3021 3026 message += '\n(grafted from %s)' % ctx.hex()
3022 3027
3023 3028 # we don't merge the first commit when continuing
3024 3029 if not cont:
3025 3030 # perform the graft merge with p1(rev) as 'ancestor'
3026 3031 try:
3027 3032 # ui.forcemerge is an internal variable, do not document
3028 3033 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
3029 3034 stats = mergemod.update(repo, ctx.node(), True, True, False,
3030 3035 ctx.p1().node())
3031 3036 finally:
3032 3037 repo.ui.setconfig('ui', 'forcemerge', '')
3033 3038 # report any conflicts
3034 3039 if stats and stats[3] > 0:
3035 3040 # write out state for --continue
3036 3041 nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]]
3037 3042 repo.opener.write('graftstate', ''.join(nodelines))
3038 3043 raise util.Abort(
3039 3044 _("unresolved conflicts, can't continue"),
3040 3045 hint=_('use hg resolve and hg graft --continue'))
3041 3046 else:
3042 3047 cont = False
3043 3048
3044 3049 # drop the second merge parent
3045 3050 repo.setparents(current.node(), nullid)
3046 3051 repo.dirstate.write()
3047 3052 # fix up dirstate for copies and renames
3048 3053 cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
3049 3054
3050 3055 # commit
3051 3056 node = repo.commit(text=message, user=user,
3052 3057 date=date, extra=extra, editor=editor)
3053 3058 if node is None:
3054 3059 ui.status(_('graft for revision %s is empty\n') % ctx.rev())
3055 3060 else:
3056 3061 current = repo[node]
3057 3062 finally:
3058 3063 wlock.release()
3059 3064
3060 3065 # remove state when we complete successfully
3061 3066 if not opts.get('dry_run'):
3062 3067 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
3063 3068
3064 3069 return 0
3065 3070
3066 3071 @command('grep',
3067 3072 [('0', 'print0', None, _('end fields with NUL')),
3068 3073 ('', 'all', None, _('print all revisions that match')),
3069 3074 ('a', 'text', None, _('treat all files as text')),
3070 3075 ('f', 'follow', None,
3071 3076 _('follow changeset history,'
3072 3077 ' or file history across copies and renames')),
3073 3078 ('i', 'ignore-case', None, _('ignore case when matching')),
3074 3079 ('l', 'files-with-matches', None,
3075 3080 _('print only filenames and revisions that match')),
3076 3081 ('n', 'line-number', None, _('print matching line numbers')),
3077 3082 ('r', 'rev', [],
3078 3083 _('only search files changed within revision range'), _('REV')),
3079 3084 ('u', 'user', None, _('list the author (long with -v)')),
3080 3085 ('d', 'date', None, _('list the date (short with -q)')),
3081 3086 ] + walkopts,
3082 3087 _('[OPTION]... PATTERN [FILE]...'))
3083 3088 def grep(ui, repo, pattern, *pats, **opts):
3084 3089 """search for a pattern in specified files and revisions
3085 3090
3086 3091 Search revisions of files for a regular expression.
3087 3092
3088 3093 This command behaves differently than Unix grep. It only accepts
3089 3094 Python/Perl regexps. It searches repository history, not the
3090 3095 working directory. It always prints the revision number in which a
3091 3096 match appears.
3092 3097
3093 3098 By default, grep only prints output for the first revision of a
3094 3099 file in which it finds a match. To get it to print every revision
3095 3100 that contains a change in match status ("-" for a match that
3096 3101 becomes a non-match, or "+" for a non-match that becomes a match),
3097 3102 use the --all flag.
3098 3103
3099 3104 Returns 0 if a match is found, 1 otherwise.
3100 3105 """
3101 3106 reflags = re.M
3102 3107 if opts.get('ignore_case'):
3103 3108 reflags |= re.I
3104 3109 try:
3105 3110 regexp = util.compilere(pattern, reflags)
3106 3111 except re.error, inst:
3107 3112 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
3108 3113 return 1
3109 3114 sep, eol = ':', '\n'
3110 3115 if opts.get('print0'):
3111 3116 sep = eol = '\0'
3112 3117
3113 3118 getfile = util.lrucachefunc(repo.file)
3114 3119
3115 3120 def matchlines(body):
3116 3121 begin = 0
3117 3122 linenum = 0
3118 3123 while begin < len(body):
3119 3124 match = regexp.search(body, begin)
3120 3125 if not match:
3121 3126 break
3122 3127 mstart, mend = match.span()
3123 3128 linenum += body.count('\n', begin, mstart) + 1
3124 3129 lstart = body.rfind('\n', begin, mstart) + 1 or begin
3125 3130 begin = body.find('\n', mend) + 1 or len(body) + 1
3126 3131 lend = begin - 1
3127 3132 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
3128 3133
3129 3134 class linestate(object):
3130 3135 def __init__(self, line, linenum, colstart, colend):
3131 3136 self.line = line
3132 3137 self.linenum = linenum
3133 3138 self.colstart = colstart
3134 3139 self.colend = colend
3135 3140
3136 3141 def __hash__(self):
3137 3142 return hash((self.linenum, self.line))
3138 3143
3139 3144 def __eq__(self, other):
3140 3145 return self.line == other.line
3141 3146
3142 3147 matches = {}
3143 3148 copies = {}
3144 3149 def grepbody(fn, rev, body):
3145 3150 matches[rev].setdefault(fn, [])
3146 3151 m = matches[rev][fn]
3147 3152 for lnum, cstart, cend, line in matchlines(body):
3148 3153 s = linestate(line, lnum, cstart, cend)
3149 3154 m.append(s)
3150 3155
3151 3156 def difflinestates(a, b):
3152 3157 sm = difflib.SequenceMatcher(None, a, b)
3153 3158 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3154 3159 if tag == 'insert':
3155 3160 for i in xrange(blo, bhi):
3156 3161 yield ('+', b[i])
3157 3162 elif tag == 'delete':
3158 3163 for i in xrange(alo, ahi):
3159 3164 yield ('-', a[i])
3160 3165 elif tag == 'replace':
3161 3166 for i in xrange(alo, ahi):
3162 3167 yield ('-', a[i])
3163 3168 for i in xrange(blo, bhi):
3164 3169 yield ('+', b[i])
3165 3170
3166 3171 def display(fn, ctx, pstates, states):
3167 3172 rev = ctx.rev()
3168 3173 datefunc = ui.quiet and util.shortdate or util.datestr
3169 3174 found = False
3170 3175 filerevmatches = {}
3171 3176 def binary():
3172 3177 flog = getfile(fn)
3173 3178 return util.binary(flog.read(ctx.filenode(fn)))
3174 3179
3175 3180 if opts.get('all'):
3176 3181 iter = difflinestates(pstates, states)
3177 3182 else:
3178 3183 iter = [('', l) for l in states]
3179 3184 for change, l in iter:
3180 3185 cols = [(fn, 'grep.filename'), (str(rev), 'grep.rev')]
3181 3186 before, match, after = None, None, None
3182 3187
3183 3188 if opts.get('line_number'):
3184 3189 cols.append((str(l.linenum), 'grep.linenumber'))
3185 3190 if opts.get('all'):
3186 3191 cols.append((change, 'grep.change'))
3187 3192 if opts.get('user'):
3188 3193 cols.append((ui.shortuser(ctx.user()), 'grep.user'))
3189 3194 if opts.get('date'):
3190 3195 cols.append((datefunc(ctx.date()), 'grep.date'))
3191 3196 if opts.get('files_with_matches'):
3192 3197 c = (fn, rev)
3193 3198 if c in filerevmatches:
3194 3199 continue
3195 3200 filerevmatches[c] = 1
3196 3201 else:
3197 3202 before = l.line[:l.colstart]
3198 3203 match = l.line[l.colstart:l.colend]
3199 3204 after = l.line[l.colend:]
3200 3205 for col, label in cols[:-1]:
3201 3206 ui.write(col, label=label)
3202 3207 ui.write(sep, label='grep.sep')
3203 3208 ui.write(cols[-1][0], label=cols[-1][1])
3204 3209 if before is not None:
3205 3210 ui.write(sep, label='grep.sep')
3206 3211 if not opts.get('text') and binary():
3207 3212 ui.write(" Binary file matches")
3208 3213 else:
3209 3214 ui.write(before)
3210 3215 ui.write(match, label='grep.match')
3211 3216 ui.write(after)
3212 3217 ui.write(eol)
3213 3218 found = True
3214 3219 return found
3215 3220
3216 3221 skip = {}
3217 3222 revfiles = {}
3218 3223 matchfn = scmutil.match(repo[None], pats, opts)
3219 3224 found = False
3220 3225 follow = opts.get('follow')
3221 3226
3222 3227 def prep(ctx, fns):
3223 3228 rev = ctx.rev()
3224 3229 pctx = ctx.p1()
3225 3230 parent = pctx.rev()
3226 3231 matches.setdefault(rev, {})
3227 3232 matches.setdefault(parent, {})
3228 3233 files = revfiles.setdefault(rev, [])
3229 3234 for fn in fns:
3230 3235 flog = getfile(fn)
3231 3236 try:
3232 3237 fnode = ctx.filenode(fn)
3233 3238 except error.LookupError:
3234 3239 continue
3235 3240
3236 3241 copied = flog.renamed(fnode)
3237 3242 copy = follow and copied and copied[0]
3238 3243 if copy:
3239 3244 copies.setdefault(rev, {})[fn] = copy
3240 3245 if fn in skip:
3241 3246 if copy:
3242 3247 skip[copy] = True
3243 3248 continue
3244 3249 files.append(fn)
3245 3250
3246 3251 if fn not in matches[rev]:
3247 3252 grepbody(fn, rev, flog.read(fnode))
3248 3253
3249 3254 pfn = copy or fn
3250 3255 if pfn not in matches[parent]:
3251 3256 try:
3252 3257 fnode = pctx.filenode(pfn)
3253 3258 grepbody(pfn, parent, flog.read(fnode))
3254 3259 except error.LookupError:
3255 3260 pass
3256 3261
3257 3262 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
3258 3263 rev = ctx.rev()
3259 3264 parent = ctx.p1().rev()
3260 3265 for fn in sorted(revfiles.get(rev, [])):
3261 3266 states = matches[rev][fn]
3262 3267 copy = copies.get(rev, {}).get(fn)
3263 3268 if fn in skip:
3264 3269 if copy:
3265 3270 skip[copy] = True
3266 3271 continue
3267 3272 pstates = matches.get(parent, {}).get(copy or fn, [])
3268 3273 if pstates or states:
3269 3274 r = display(fn, ctx, pstates, states)
3270 3275 found = found or r
3271 3276 if r and not opts.get('all'):
3272 3277 skip[fn] = True
3273 3278 if copy:
3274 3279 skip[copy] = True
3275 3280 del matches[rev]
3276 3281 del revfiles[rev]
3277 3282
3278 3283 return not found
3279 3284
3280 3285 @command('heads',
3281 3286 [('r', 'rev', '',
3282 3287 _('show only heads which are descendants of STARTREV'), _('STARTREV')),
3283 3288 ('t', 'topo', False, _('show topological heads only')),
3284 3289 ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
3285 3290 ('c', 'closed', False, _('show normal and closed branch heads')),
3286 3291 ] + templateopts,
3287 3292 _('[-ct] [-r STARTREV] [REV]...'))
3288 3293 def heads(ui, repo, *branchrevs, **opts):
3289 3294 """show current repository heads or show branch heads
3290 3295
3291 3296 With no arguments, show all repository branch heads.
3292 3297
3293 3298 Repository "heads" are changesets with no child changesets. They are
3294 3299 where development generally takes place and are the usual targets
3295 3300 for update and merge operations. Branch heads are changesets that have
3296 3301 no child changeset on the same branch.
3297 3302
3298 3303 If one or more REVs are given, only branch heads on the branches
3299 3304 associated with the specified changesets are shown. This means
3300 3305 that you can use :hg:`heads foo` to see the heads on a branch
3301 3306 named ``foo``.
3302 3307
3303 3308 If -c/--closed is specified, also show branch heads marked closed
3304 3309 (see :hg:`commit --close-branch`).
3305 3310
3306 3311 If STARTREV is specified, only those heads that are descendants of
3307 3312 STARTREV will be displayed.
3308 3313
3309 3314 If -t/--topo is specified, named branch mechanics will be ignored and only
3310 3315 changesets without children will be shown.
3311 3316
3312 3317 Returns 0 if matching heads are found, 1 if not.
3313 3318 """
3314 3319
3315 3320 start = None
3316 3321 if 'rev' in opts:
3317 3322 start = scmutil.revsingle(repo, opts['rev'], None).node()
3318 3323
3319 3324 if opts.get('topo'):
3320 3325 heads = [repo[h] for h in repo.heads(start)]
3321 3326 else:
3322 3327 heads = []
3323 3328 for branch in repo.branchmap():
3324 3329 heads += repo.branchheads(branch, start, opts.get('closed'))
3325 3330 heads = [repo[h] for h in heads]
3326 3331
3327 3332 if branchrevs:
3328 3333 branches = set(repo[br].branch() for br in branchrevs)
3329 3334 heads = [h for h in heads if h.branch() in branches]
3330 3335
3331 3336 if opts.get('active') and branchrevs:
3332 3337 dagheads = repo.heads(start)
3333 3338 heads = [h for h in heads if h.node() in dagheads]
3334 3339
3335 3340 if branchrevs:
3336 3341 haveheads = set(h.branch() for h in heads)
3337 3342 if branches - haveheads:
3338 3343 headless = ', '.join(b for b in branches - haveheads)
3339 3344 msg = _('no open branch heads found on branches %s')
3340 3345 if opts.get('rev'):
3341 3346 msg += _(' (started at %s)') % opts['rev']
3342 3347 ui.warn((msg + '\n') % headless)
3343 3348
3344 3349 if not heads:
3345 3350 return 1
3346 3351
3347 3352 heads = sorted(heads, key=lambda x: -x.rev())
3348 3353 displayer = cmdutil.show_changeset(ui, repo, opts)
3349 3354 for ctx in heads:
3350 3355 displayer.show(ctx)
3351 3356 displayer.close()
3352 3357
3353 3358 @command('help',
3354 3359 [('e', 'extension', None, _('show only help for extensions')),
3355 3360 ('c', 'command', None, _('show only help for commands')),
3356 3361 ('k', 'keyword', '', _('show topics matching keyword')),
3357 3362 ],
3358 3363 _('[-ec] [TOPIC]'))
3359 3364 def help_(ui, name=None, **opts):
3360 3365 """show help for a given topic or a help overview
3361 3366
3362 3367 With no arguments, print a list of commands with short help messages.
3363 3368
3364 3369 Given a topic, extension, or command name, print help for that
3365 3370 topic.
3366 3371
3367 3372 Returns 0 if successful.
3368 3373 """
3369 3374
3370 3375 textwidth = min(ui.termwidth(), 80) - 2
3371 3376
3372 3377 keep = ui.verbose and ['verbose'] or []
3373 3378 text = help.help_(ui, name, **opts)
3374 3379
3375 3380 formatted, pruned = minirst.format(text, textwidth, keep=keep)
3376 3381 if 'verbose' in pruned:
3377 3382 keep.append('omitted')
3378 3383 else:
3379 3384 keep.append('notomitted')
3380 3385 formatted, pruned = minirst.format(text, textwidth, keep=keep)
3381 3386 ui.write(formatted)
3382 3387
3383 3388
3384 3389 @command('identify|id',
3385 3390 [('r', 'rev', '',
3386 3391 _('identify the specified revision'), _('REV')),
3387 3392 ('n', 'num', None, _('show local revision number')),
3388 3393 ('i', 'id', None, _('show global revision id')),
3389 3394 ('b', 'branch', None, _('show branch')),
3390 3395 ('t', 'tags', None, _('show tags')),
3391 3396 ('B', 'bookmarks', None, _('show bookmarks')),
3392 3397 ] + remoteopts,
3393 3398 _('[-nibtB] [-r REV] [SOURCE]'))
3394 3399 def identify(ui, repo, source=None, rev=None,
3395 3400 num=None, id=None, branch=None, tags=None, bookmarks=None, **opts):
3396 3401 """identify the working copy or specified revision
3397 3402
3398 3403 Print a summary identifying the repository state at REV using one or
3399 3404 two parent hash identifiers, followed by a "+" if the working
3400 3405 directory has uncommitted changes, the branch name (if not default),
3401 3406 a list of tags, and a list of bookmarks.
3402 3407
3403 3408 When REV is not given, print a summary of the current state of the
3404 3409 repository.
3405 3410
3406 3411 Specifying a path to a repository root or Mercurial bundle will
3407 3412 cause lookup to operate on that repository/bundle.
3408 3413
3409 3414 .. container:: verbose
3410 3415
3411 3416 Examples:
3412 3417
3413 3418 - generate a build identifier for the working directory::
3414 3419
3415 3420 hg id --id > build-id.dat
3416 3421
3417 3422 - find the revision corresponding to a tag::
3418 3423
3419 3424 hg id -n -r 1.3
3420 3425
3421 3426 - check the most recent revision of a remote repository::
3422 3427
3423 3428 hg id -r tip http://selenic.com/hg/
3424 3429
3425 3430 Returns 0 if successful.
3426 3431 """
3427 3432
3428 3433 if not repo and not source:
3429 3434 raise util.Abort(_("there is no Mercurial repository here "
3430 3435 "(.hg not found)"))
3431 3436
3432 3437 hexfunc = ui.debugflag and hex or short
3433 3438 default = not (num or id or branch or tags or bookmarks)
3434 3439 output = []
3435 3440 revs = []
3436 3441
3437 3442 if source:
3438 3443 source, branches = hg.parseurl(ui.expandpath(source))
3439 3444 peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
3440 3445 repo = peer.local()
3441 3446 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3442 3447
3443 3448 if not repo:
3444 3449 if num or branch or tags:
3445 3450 raise util.Abort(
3446 3451 _("can't query remote revision number, branch, or tags"))
3447 3452 if not rev and revs:
3448 3453 rev = revs[0]
3449 3454 if not rev:
3450 3455 rev = "tip"
3451 3456
3452 3457 remoterev = peer.lookup(rev)
3453 3458 if default or id:
3454 3459 output = [hexfunc(remoterev)]
3455 3460
3456 3461 def getbms():
3457 3462 bms = []
3458 3463
3459 3464 if 'bookmarks' in peer.listkeys('namespaces'):
3460 3465 hexremoterev = hex(remoterev)
3461 3466 bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems()
3462 3467 if bmr == hexremoterev]
3463 3468
3464 3469 return sorted(bms)
3465 3470
3466 3471 if bookmarks:
3467 3472 output.extend(getbms())
3468 3473 elif default and not ui.quiet:
3469 3474 # multiple bookmarks for a single parent separated by '/'
3470 3475 bm = '/'.join(getbms())
3471 3476 if bm:
3472 3477 output.append(bm)
3473 3478 else:
3474 3479 if not rev:
3475 3480 ctx = repo[None]
3476 3481 parents = ctx.parents()
3477 3482 changed = ""
3478 3483 if default or id or num:
3479 3484 if (util.any(repo.status())
3480 3485 or util.any(ctx.sub(s).dirty() for s in ctx.substate)):
3481 3486 changed = '+'
3482 3487 if default or id:
3483 3488 output = ["%s%s" %
3484 3489 ('+'.join([hexfunc(p.node()) for p in parents]), changed)]
3485 3490 if num:
3486 3491 output.append("%s%s" %
3487 3492 ('+'.join([str(p.rev()) for p in parents]), changed))
3488 3493 else:
3489 3494 ctx = scmutil.revsingle(repo, rev)
3490 3495 if default or id:
3491 3496 output = [hexfunc(ctx.node())]
3492 3497 if num:
3493 3498 output.append(str(ctx.rev()))
3494 3499
3495 3500 if default and not ui.quiet:
3496 3501 b = ctx.branch()
3497 3502 if b != 'default':
3498 3503 output.append("(%s)" % b)
3499 3504
3500 3505 # multiple tags for a single parent separated by '/'
3501 3506 t = '/'.join(ctx.tags())
3502 3507 if t:
3503 3508 output.append(t)
3504 3509
3505 3510 # multiple bookmarks for a single parent separated by '/'
3506 3511 bm = '/'.join(ctx.bookmarks())
3507 3512 if bm:
3508 3513 output.append(bm)
3509 3514 else:
3510 3515 if branch:
3511 3516 output.append(ctx.branch())
3512 3517
3513 3518 if tags:
3514 3519 output.extend(ctx.tags())
3515 3520
3516 3521 if bookmarks:
3517 3522 output.extend(ctx.bookmarks())
3518 3523
3519 3524 ui.write("%s\n" % ' '.join(output))
3520 3525
3521 3526 @command('import|patch',
3522 3527 [('p', 'strip', 1,
3523 3528 _('directory strip option for patch. This has the same '
3524 3529 'meaning as the corresponding patch option'), _('NUM')),
3525 3530 ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')),
3526 3531 ('e', 'edit', False, _('invoke editor on commit messages')),
3527 3532 ('f', 'force', None, _('skip check for outstanding uncommitted changes')),
3528 3533 ('', 'no-commit', None,
3529 3534 _("don't commit, just update the working directory")),
3530 3535 ('', 'bypass', None,
3531 3536 _("apply patch without touching the working directory")),
3532 3537 ('', 'exact', None,
3533 3538 _('apply patch to the nodes from which it was generated')),
3534 3539 ('', 'import-branch', None,
3535 3540 _('use any branch information in patch (implied by --exact)'))] +
3536 3541 commitopts + commitopts2 + similarityopts,
3537 3542 _('[OPTION]... PATCH...'))
3538 3543 def import_(ui, repo, patch1=None, *patches, **opts):
3539 3544 """import an ordered set of patches
3540 3545
3541 3546 Import a list of patches and commit them individually (unless
3542 3547 --no-commit is specified).
3543 3548
3544 3549 If there are outstanding changes in the working directory, import
3545 3550 will abort unless given the -f/--force flag.
3546 3551
3547 3552 You can import a patch straight from a mail message. Even patches
3548 3553 as attachments work (to use the body part, it must have type
3549 3554 text/plain or text/x-patch). From and Subject headers of email
3550 3555 message are used as default committer and commit message. All
3551 3556 text/plain body parts before first diff are added to commit
3552 3557 message.
3553 3558
3554 3559 If the imported patch was generated by :hg:`export`, user and
3555 3560 description from patch override values from message headers and
3556 3561 body. Values given on command line with -m/--message and -u/--user
3557 3562 override these.
3558 3563
3559 3564 If --exact is specified, import will set the working directory to
3560 3565 the parent of each patch before applying it, and will abort if the
3561 3566 resulting changeset has a different ID than the one recorded in
3562 3567 the patch. This may happen due to character set problems or other
3563 3568 deficiencies in the text patch format.
3564 3569
3565 3570 Use --bypass to apply and commit patches directly to the
3566 3571 repository, not touching the working directory. Without --exact,
3567 3572 patches will be applied on top of the working directory parent
3568 3573 revision.
3569 3574
3570 3575 With -s/--similarity, hg will attempt to discover renames and
3571 3576 copies in the patch in the same way as :hg:`addremove`.
3572 3577
3573 3578 To read a patch from standard input, use "-" as the patch name. If
3574 3579 a URL is specified, the patch will be downloaded from it.
3575 3580 See :hg:`help dates` for a list of formats valid for -d/--date.
3576 3581
3577 3582 .. container:: verbose
3578 3583
3579 3584 Examples:
3580 3585
3581 3586 - import a traditional patch from a website and detect renames::
3582 3587
3583 3588 hg import -s 80 http://example.com/bugfix.patch
3584 3589
3585 3590 - import a changeset from an hgweb server::
3586 3591
3587 3592 hg import http://www.selenic.com/hg/rev/5ca8c111e9aa
3588 3593
3589 3594 - import all the patches in an Unix-style mbox::
3590 3595
3591 3596 hg import incoming-patches.mbox
3592 3597
3593 3598 - attempt to exactly restore an exported changeset (not always
3594 3599 possible)::
3595 3600
3596 3601 hg import --exact proposed-fix.patch
3597 3602
3598 3603 Returns 0 on success.
3599 3604 """
3600 3605
3601 3606 if not patch1:
3602 3607 raise util.Abort(_('need at least one patch to import'))
3603 3608
3604 3609 patches = (patch1,) + patches
3605 3610
3606 3611 date = opts.get('date')
3607 3612 if date:
3608 3613 opts['date'] = util.parsedate(date)
3609 3614
3610 3615 editor = cmdutil.commiteditor
3611 3616 if opts.get('edit'):
3612 3617 editor = cmdutil.commitforceeditor
3613 3618
3614 3619 update = not opts.get('bypass')
3615 3620 if not update and opts.get('no_commit'):
3616 3621 raise util.Abort(_('cannot use --no-commit with --bypass'))
3617 3622 try:
3618 3623 sim = float(opts.get('similarity') or 0)
3619 3624 except ValueError:
3620 3625 raise util.Abort(_('similarity must be a number'))
3621 3626 if sim < 0 or sim > 100:
3622 3627 raise util.Abort(_('similarity must be between 0 and 100'))
3623 3628 if sim and not update:
3624 3629 raise util.Abort(_('cannot use --similarity with --bypass'))
3625 3630
3626 3631 if (opts.get('exact') or not opts.get('force')) and update:
3627 3632 cmdutil.bailifchanged(repo)
3628 3633
3629 3634 base = opts["base"]
3630 3635 strip = opts["strip"]
3631 3636 wlock = lock = tr = None
3632 3637 msgs = []
3633 3638
3634 3639 def tryone(ui, hunk, parents):
3635 3640 tmpname, message, user, date, branch, nodeid, p1, p2 = \
3636 3641 patch.extract(ui, hunk)
3637 3642
3638 3643 if not tmpname:
3639 3644 return (None, None)
3640 3645 msg = _('applied to working directory')
3641 3646
3642 3647 try:
3643 3648 cmdline_message = cmdutil.logmessage(ui, opts)
3644 3649 if cmdline_message:
3645 3650 # pickup the cmdline msg
3646 3651 message = cmdline_message
3647 3652 elif message:
3648 3653 # pickup the patch msg
3649 3654 message = message.strip()
3650 3655 else:
3651 3656 # launch the editor
3652 3657 message = None
3653 3658 ui.debug('message:\n%s\n' % message)
3654 3659
3655 3660 if len(parents) == 1:
3656 3661 parents.append(repo[nullid])
3657 3662 if opts.get('exact'):
3658 3663 if not nodeid or not p1:
3659 3664 raise util.Abort(_('not a Mercurial patch'))
3660 3665 p1 = repo[p1]
3661 3666 p2 = repo[p2 or nullid]
3662 3667 elif p2:
3663 3668 try:
3664 3669 p1 = repo[p1]
3665 3670 p2 = repo[p2]
3666 3671 # Without any options, consider p2 only if the
3667 3672 # patch is being applied on top of the recorded
3668 3673 # first parent.
3669 3674 if p1 != parents[0]:
3670 3675 p1 = parents[0]
3671 3676 p2 = repo[nullid]
3672 3677 except error.RepoError:
3673 3678 p1, p2 = parents
3674 3679 else:
3675 3680 p1, p2 = parents
3676 3681
3677 3682 n = None
3678 3683 if update:
3679 3684 if p1 != parents[0]:
3680 3685 hg.clean(repo, p1.node())
3681 3686 if p2 != parents[1]:
3682 3687 repo.setparents(p1.node(), p2.node())
3683 3688
3684 3689 if opts.get('exact') or opts.get('import_branch'):
3685 3690 repo.dirstate.setbranch(branch or 'default')
3686 3691
3687 3692 files = set()
3688 3693 patch.patch(ui, repo, tmpname, strip=strip, files=files,
3689 3694 eolmode=None, similarity=sim / 100.0)
3690 3695 files = list(files)
3691 3696 if opts.get('no_commit'):
3692 3697 if message:
3693 3698 msgs.append(message)
3694 3699 else:
3695 3700 if opts.get('exact') or p2:
3696 3701 # If you got here, you either use --force and know what
3697 3702 # you are doing or used --exact or a merge patch while
3698 3703 # being updated to its first parent.
3699 3704 m = None
3700 3705 else:
3701 3706 m = scmutil.matchfiles(repo, files or [])
3702 3707 n = repo.commit(message, opts.get('user') or user,
3703 3708 opts.get('date') or date, match=m,
3704 3709 editor=editor)
3705 3710 else:
3706 3711 if opts.get('exact') or opts.get('import_branch'):
3707 3712 branch = branch or 'default'
3708 3713 else:
3709 3714 branch = p1.branch()
3710 3715 store = patch.filestore()
3711 3716 try:
3712 3717 files = set()
3713 3718 try:
3714 3719 patch.patchrepo(ui, repo, p1, store, tmpname, strip,
3715 3720 files, eolmode=None)
3716 3721 except patch.PatchError, e:
3717 3722 raise util.Abort(str(e))
3718 3723 memctx = patch.makememctx(repo, (p1.node(), p2.node()),
3719 3724 message,
3720 3725 opts.get('user') or user,
3721 3726 opts.get('date') or date,
3722 3727 branch, files, store,
3723 3728 editor=cmdutil.commiteditor)
3724 3729 repo.savecommitmessage(memctx.description())
3725 3730 n = memctx.commit()
3726 3731 finally:
3727 3732 store.close()
3728 3733 if opts.get('exact') and hex(n) != nodeid:
3729 3734 raise util.Abort(_('patch is damaged or loses information'))
3730 3735 if n:
3731 3736 # i18n: refers to a short changeset id
3732 3737 msg = _('created %s') % short(n)
3733 3738 return (msg, n)
3734 3739 finally:
3735 3740 os.unlink(tmpname)
3736 3741
3737 3742 try:
3738 3743 try:
3739 3744 wlock = repo.wlock()
3740 3745 if not opts.get('no_commit'):
3741 3746 lock = repo.lock()
3742 3747 tr = repo.transaction('import')
3743 3748 parents = repo.parents()
3744 3749 for patchurl in patches:
3745 3750 if patchurl == '-':
3746 3751 ui.status(_('applying patch from stdin\n'))
3747 3752 patchfile = ui.fin
3748 3753 patchurl = 'stdin' # for error message
3749 3754 else:
3750 3755 patchurl = os.path.join(base, patchurl)
3751 3756 ui.status(_('applying %s\n') % patchurl)
3752 3757 patchfile = hg.openpath(ui, patchurl)
3753 3758
3754 3759 haspatch = False
3755 3760 for hunk in patch.split(patchfile):
3756 3761 (msg, node) = tryone(ui, hunk, parents)
3757 3762 if msg:
3758 3763 haspatch = True
3759 3764 ui.note(msg + '\n')
3760 3765 if update or opts.get('exact'):
3761 3766 parents = repo.parents()
3762 3767 else:
3763 3768 parents = [repo[node]]
3764 3769
3765 3770 if not haspatch:
3766 3771 raise util.Abort(_('%s: no diffs found') % patchurl)
3767 3772
3768 3773 if tr:
3769 3774 tr.close()
3770 3775 if msgs:
3771 3776 repo.savecommitmessage('\n* * *\n'.join(msgs))
3772 3777 except: # re-raises
3773 3778 # wlock.release() indirectly calls dirstate.write(): since
3774 3779 # we're crashing, we do not want to change the working dir
3775 3780 # parent after all, so make sure it writes nothing
3776 3781 repo.dirstate.invalidate()
3777 3782 raise
3778 3783 finally:
3779 3784 if tr:
3780 3785 tr.release()
3781 3786 release(lock, wlock)
3782 3787
3783 3788 @command('incoming|in',
3784 3789 [('f', 'force', None,
3785 3790 _('run even if remote repository is unrelated')),
3786 3791 ('n', 'newest-first', None, _('show newest record first')),
3787 3792 ('', 'bundle', '',
3788 3793 _('file to store the bundles into'), _('FILE')),
3789 3794 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3790 3795 ('B', 'bookmarks', False, _("compare bookmarks")),
3791 3796 ('b', 'branch', [],
3792 3797 _('a specific branch you would like to pull'), _('BRANCH')),
3793 3798 ] + logopts + remoteopts + subrepoopts,
3794 3799 _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
3795 3800 def incoming(ui, repo, source="default", **opts):
3796 3801 """show new changesets found in source
3797 3802
3798 3803 Show new changesets found in the specified path/URL or the default
3799 3804 pull location. These are the changesets that would have been pulled
3800 3805 if a pull at the time you issued this command.
3801 3806
3802 3807 For remote repository, using --bundle avoids downloading the
3803 3808 changesets twice if the incoming is followed by a pull.
3804 3809
3805 3810 See pull for valid source format details.
3806 3811
3807 3812 Returns 0 if there are incoming changes, 1 otherwise.
3808 3813 """
3809 3814 if opts.get('graph'):
3810 3815 cmdutil.checkunsupportedgraphflags([], opts)
3811 3816 def display(other, chlist, displayer):
3812 3817 revdag = cmdutil.graphrevs(other, chlist, opts)
3813 3818 showparents = [ctx.node() for ctx in repo[None].parents()]
3814 3819 cmdutil.displaygraph(ui, revdag, displayer, showparents,
3815 3820 graphmod.asciiedges)
3816 3821
3817 3822 hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
3818 3823 return 0
3819 3824
3820 3825 if opts.get('bundle') and opts.get('subrepos'):
3821 3826 raise util.Abort(_('cannot combine --bundle and --subrepos'))
3822 3827
3823 3828 if opts.get('bookmarks'):
3824 3829 source, branches = hg.parseurl(ui.expandpath(source),
3825 3830 opts.get('branch'))
3826 3831 other = hg.peer(repo, opts, source)
3827 3832 if 'bookmarks' not in other.listkeys('namespaces'):
3828 3833 ui.warn(_("remote doesn't support bookmarks\n"))
3829 3834 return 0
3830 3835 ui.status(_('comparing with %s\n') % util.hidepassword(source))
3831 3836 return bookmarks.diff(ui, repo, other)
3832 3837
3833 3838 repo._subtoppath = ui.expandpath(source)
3834 3839 try:
3835 3840 return hg.incoming(ui, repo, source, opts)
3836 3841 finally:
3837 3842 del repo._subtoppath
3838 3843
3839 3844
3840 3845 @command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'))
3841 3846 def init(ui, dest=".", **opts):
3842 3847 """create a new repository in the given directory
3843 3848
3844 3849 Initialize a new repository in the given directory. If the given
3845 3850 directory does not exist, it will be created.
3846 3851
3847 3852 If no directory is given, the current directory is used.
3848 3853
3849 3854 It is possible to specify an ``ssh://`` URL as the destination.
3850 3855 See :hg:`help urls` for more information.
3851 3856
3852 3857 Returns 0 on success.
3853 3858 """
3854 3859 hg.peer(ui, opts, ui.expandpath(dest), create=True)
3855 3860
3856 3861 @command('locate',
3857 3862 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
3858 3863 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
3859 3864 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
3860 3865 ] + walkopts,
3861 3866 _('[OPTION]... [PATTERN]...'))
3862 3867 def locate(ui, repo, *pats, **opts):
3863 3868 """locate files matching specific patterns
3864 3869
3865 3870 Print files under Mercurial control in the working directory whose
3866 3871 names match the given patterns.
3867 3872
3868 3873 By default, this command searches all directories in the working
3869 3874 directory. To search just the current directory and its
3870 3875 subdirectories, use "--include .".
3871 3876
3872 3877 If no patterns are given to match, this command prints the names
3873 3878 of all files under Mercurial control in the working directory.
3874 3879
3875 3880 If you want to feed the output of this command into the "xargs"
3876 3881 command, use the -0 option to both this command and "xargs". This
3877 3882 will avoid the problem of "xargs" treating single filenames that
3878 3883 contain whitespace as multiple filenames.
3879 3884
3880 3885 Returns 0 if a match is found, 1 otherwise.
3881 3886 """
3882 3887 end = opts.get('print0') and '\0' or '\n'
3883 3888 rev = scmutil.revsingle(repo, opts.get('rev'), None).node()
3884 3889
3885 3890 ret = 1
3886 3891 m = scmutil.match(repo[rev], pats, opts, default='relglob')
3887 3892 m.bad = lambda x, y: False
3888 3893 for abs in repo[rev].walk(m):
3889 3894 if not rev and abs not in repo.dirstate:
3890 3895 continue
3891 3896 if opts.get('fullpath'):
3892 3897 ui.write(repo.wjoin(abs), end)
3893 3898 else:
3894 3899 ui.write(((pats and m.rel(abs)) or abs), end)
3895 3900 ret = 0
3896 3901
3897 3902 return ret
3898 3903
3899 3904 @command('^log|history',
3900 3905 [('f', 'follow', None,
3901 3906 _('follow changeset history, or file history across copies and renames')),
3902 3907 ('', 'follow-first', None,
3903 3908 _('only follow the first parent of merge changesets (DEPRECATED)')),
3904 3909 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
3905 3910 ('C', 'copies', None, _('show copied files')),
3906 3911 ('k', 'keyword', [],
3907 3912 _('do case-insensitive search for a given text'), _('TEXT')),
3908 3913 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
3909 3914 ('', 'removed', None, _('include revisions where files were removed')),
3910 3915 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
3911 3916 ('u', 'user', [], _('revisions committed by user'), _('USER')),
3912 3917 ('', 'only-branch', [],
3913 3918 _('show only changesets within the given named branch (DEPRECATED)'),
3914 3919 _('BRANCH')),
3915 3920 ('b', 'branch', [],
3916 3921 _('show changesets within the given named branch'), _('BRANCH')),
3917 3922 ('P', 'prune', [],
3918 3923 _('do not display revision or any of its ancestors'), _('REV')),
3919 3924 ] + logopts + walkopts,
3920 3925 _('[OPTION]... [FILE]'))
3921 3926 def log(ui, repo, *pats, **opts):
3922 3927 """show revision history of entire repository or files
3923 3928
3924 3929 Print the revision history of the specified files or the entire
3925 3930 project.
3926 3931
3927 3932 If no revision range is specified, the default is ``tip:0`` unless
3928 3933 --follow is set, in which case the working directory parent is
3929 3934 used as the starting revision.
3930 3935
3931 3936 File history is shown without following rename or copy history of
3932 3937 files. Use -f/--follow with a filename to follow history across
3933 3938 renames and copies. --follow without a filename will only show
3934 3939 ancestors or descendants of the starting revision.
3935 3940
3936 3941 By default this command prints revision number and changeset id,
3937 3942 tags, non-trivial parents, user, date and time, and a summary for
3938 3943 each commit. When the -v/--verbose switch is used, the list of
3939 3944 changed files and full commit message are shown.
3940 3945
3941 3946 .. note::
3942 3947 log -p/--patch may generate unexpected diff output for merge
3943 3948 changesets, as it will only compare the merge changeset against
3944 3949 its first parent. Also, only files different from BOTH parents
3945 3950 will appear in files:.
3946 3951
3947 3952 .. note::
3948 3953 for performance reasons, log FILE may omit duplicate changes
3949 3954 made on branches and will not show deletions. To see all
3950 3955 changes including duplicates and deletions, use the --removed
3951 3956 switch.
3952 3957
3953 3958 .. container:: verbose
3954 3959
3955 3960 Some examples:
3956 3961
3957 3962 - changesets with full descriptions and file lists::
3958 3963
3959 3964 hg log -v
3960 3965
3961 3966 - changesets ancestral to the working directory::
3962 3967
3963 3968 hg log -f
3964 3969
3965 3970 - last 10 commits on the current branch::
3966 3971
3967 3972 hg log -l 10 -b .
3968 3973
3969 3974 - changesets showing all modifications of a file, including removals::
3970 3975
3971 3976 hg log --removed file.c
3972 3977
3973 3978 - all changesets that touch a directory, with diffs, excluding merges::
3974 3979
3975 3980 hg log -Mp lib/
3976 3981
3977 3982 - all revision numbers that match a keyword::
3978 3983
3979 3984 hg log -k bug --template "{rev}\\n"
3980 3985
3981 3986 - check if a given changeset is included is a tagged release::
3982 3987
3983 3988 hg log -r "a21ccf and ancestor(1.9)"
3984 3989
3985 3990 - find all changesets by some user in a date range::
3986 3991
3987 3992 hg log -k alice -d "may 2008 to jul 2008"
3988 3993
3989 3994 - summary of all changesets after the last tag::
3990 3995
3991 3996 hg log -r "last(tagged())::" --template "{desc|firstline}\\n"
3992 3997
3993 3998 See :hg:`help dates` for a list of formats valid for -d/--date.
3994 3999
3995 4000 See :hg:`help revisions` and :hg:`help revsets` for more about
3996 4001 specifying revisions.
3997 4002
3998 4003 See :hg:`help templates` for more about pre-packaged styles and
3999 4004 specifying custom templates.
4000 4005
4001 4006 Returns 0 on success.
4002 4007 """
4003 4008 if opts.get('graph'):
4004 4009 return cmdutil.graphlog(ui, repo, *pats, **opts)
4005 4010
4006 4011 matchfn = scmutil.match(repo[None], pats, opts)
4007 4012 limit = cmdutil.loglimit(opts)
4008 4013 count = 0
4009 4014
4010 4015 getrenamed, endrev = None, None
4011 4016 if opts.get('copies'):
4012 4017 if opts.get('rev'):
4013 4018 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
4014 4019 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
4015 4020
4016 4021 df = False
4017 4022 if opts.get("date"):
4018 4023 df = util.matchdate(opts["date"])
4019 4024
4020 4025 branches = opts.get('branch', []) + opts.get('only_branch', [])
4021 4026 opts['branch'] = [repo.lookupbranch(b) for b in branches]
4022 4027
4023 4028 displayer = cmdutil.show_changeset(ui, repo, opts, True)
4024 4029 def prep(ctx, fns):
4025 4030 rev = ctx.rev()
4026 4031 parents = [p for p in repo.changelog.parentrevs(rev)
4027 4032 if p != nullrev]
4028 4033 if opts.get('no_merges') and len(parents) == 2:
4029 4034 return
4030 4035 if opts.get('only_merges') and len(parents) != 2:
4031 4036 return
4032 4037 if opts.get('branch') and ctx.branch() not in opts['branch']:
4033 4038 return
4034 4039 if df and not df(ctx.date()[0]):
4035 4040 return
4036 4041
4037 4042 lower = encoding.lower
4038 4043 if opts.get('user'):
4039 4044 luser = lower(ctx.user())
4040 4045 for k in [lower(x) for x in opts['user']]:
4041 4046 if (k in luser):
4042 4047 break
4043 4048 else:
4044 4049 return
4045 4050 if opts.get('keyword'):
4046 4051 luser = lower(ctx.user())
4047 4052 ldesc = lower(ctx.description())
4048 4053 lfiles = lower(" ".join(ctx.files()))
4049 4054 for k in [lower(x) for x in opts['keyword']]:
4050 4055 if (k in luser or k in ldesc or k in lfiles):
4051 4056 break
4052 4057 else:
4053 4058 return
4054 4059
4055 4060 copies = None
4056 4061 if getrenamed is not None and rev:
4057 4062 copies = []
4058 4063 for fn in ctx.files():
4059 4064 rename = getrenamed(fn, rev)
4060 4065 if rename:
4061 4066 copies.append((fn, rename[0]))
4062 4067
4063 4068 revmatchfn = None
4064 4069 if opts.get('patch') or opts.get('stat'):
4065 4070 if opts.get('follow') or opts.get('follow_first'):
4066 4071 # note: this might be wrong when following through merges
4067 4072 revmatchfn = scmutil.match(repo[None], fns, default='path')
4068 4073 else:
4069 4074 revmatchfn = matchfn
4070 4075
4071 4076 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
4072 4077
4073 4078 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
4074 4079 if displayer.flush(ctx.rev()):
4075 4080 count += 1
4076 4081 if count == limit:
4077 4082 break
4078 4083 displayer.close()
4079 4084
4080 4085 @command('manifest',
4081 4086 [('r', 'rev', '', _('revision to display'), _('REV')),
4082 4087 ('', 'all', False, _("list files from all revisions"))],
4083 4088 _('[-r REV]'))
4084 4089 def manifest(ui, repo, node=None, rev=None, **opts):
4085 4090 """output the current or given revision of the project manifest
4086 4091
4087 4092 Print a list of version controlled files for the given revision.
4088 4093 If no revision is given, the first parent of the working directory
4089 4094 is used, or the null revision if no revision is checked out.
4090 4095
4091 4096 With -v, print file permissions, symlink and executable bits.
4092 4097 With --debug, print file revision hashes.
4093 4098
4094 4099 If option --all is specified, the list of all files from all revisions
4095 4100 is printed. This includes deleted and renamed files.
4096 4101
4097 4102 Returns 0 on success.
4098 4103 """
4099 4104
4100 4105 fm = ui.formatter('manifest', opts)
4101 4106
4102 4107 if opts.get('all'):
4103 4108 if rev or node:
4104 4109 raise util.Abort(_("can't specify a revision with --all"))
4105 4110
4106 4111 res = []
4107 4112 prefix = "data/"
4108 4113 suffix = ".i"
4109 4114 plen = len(prefix)
4110 4115 slen = len(suffix)
4111 4116 lock = repo.lock()
4112 4117 try:
4113 4118 for fn, b, size in repo.store.datafiles():
4114 4119 if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
4115 4120 res.append(fn[plen:-slen])
4116 4121 finally:
4117 4122 lock.release()
4118 4123 for f in res:
4119 4124 fm.startitem()
4120 4125 fm.write("path", '%s\n', f)
4121 4126 fm.end()
4122 4127 return
4123 4128
4124 4129 if rev and node:
4125 4130 raise util.Abort(_("please specify just one revision"))
4126 4131
4127 4132 if not node:
4128 4133 node = rev
4129 4134
4130 4135 char = {'l': '@', 'x': '*', '': ''}
4131 4136 mode = {'l': '644', 'x': '755', '': '644'}
4132 4137 ctx = scmutil.revsingle(repo, node)
4133 4138 mf = ctx.manifest()
4134 4139 for f in ctx:
4135 4140 fm.startitem()
4136 4141 fl = ctx[f].flags()
4137 4142 fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f]))
4138 4143 fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl])
4139 4144 fm.write('path', '%s\n', f)
4140 4145 fm.end()
4141 4146
4142 4147 @command('^merge',
4143 4148 [('f', 'force', None, _('force a merge with outstanding changes')),
4144 4149 ('r', 'rev', '', _('revision to merge'), _('REV')),
4145 4150 ('P', 'preview', None,
4146 4151 _('review revisions to merge (no merge is performed)'))
4147 4152 ] + mergetoolopts,
4148 4153 _('[-P] [-f] [[-r] REV]'))
4149 4154 def merge(ui, repo, node=None, **opts):
4150 4155 """merge working directory with another revision
4151 4156
4152 4157 The current working directory is updated with all changes made in
4153 4158 the requested revision since the last common predecessor revision.
4154 4159
4155 4160 Files that changed between either parent are marked as changed for
4156 4161 the next commit and a commit must be performed before any further
4157 4162 updates to the repository are allowed. The next commit will have
4158 4163 two parents.
4159 4164
4160 4165 ``--tool`` can be used to specify the merge tool used for file
4161 4166 merges. It overrides the HGMERGE environment variable and your
4162 4167 configuration files. See :hg:`help merge-tools` for options.
4163 4168
4164 4169 If no revision is specified, the working directory's parent is a
4165 4170 head revision, and the current branch contains exactly one other
4166 4171 head, the other head is merged with by default. Otherwise, an
4167 4172 explicit revision with which to merge with must be provided.
4168 4173
4169 4174 :hg:`resolve` must be used to resolve unresolved files.
4170 4175
4171 4176 To undo an uncommitted merge, use :hg:`update --clean .` which
4172 4177 will check out a clean copy of the original merge parent, losing
4173 4178 all changes.
4174 4179
4175 4180 Returns 0 on success, 1 if there are unresolved files.
4176 4181 """
4177 4182
4178 4183 if opts.get('rev') and node:
4179 4184 raise util.Abort(_("please specify just one revision"))
4180 4185 if not node:
4181 4186 node = opts.get('rev')
4182 4187
4183 4188 if node:
4184 4189 node = scmutil.revsingle(repo, node).node()
4185 4190
4186 4191 if not node and repo._bookmarkcurrent:
4187 4192 bmheads = repo.bookmarkheads(repo._bookmarkcurrent)
4188 4193 curhead = repo[repo._bookmarkcurrent].node()
4189 4194 if len(bmheads) == 2:
4190 4195 if curhead == bmheads[0]:
4191 4196 node = bmheads[1]
4192 4197 else:
4193 4198 node = bmheads[0]
4194 4199 elif len(bmheads) > 2:
4195 4200 raise util.Abort(_("multiple matching bookmarks to merge - "
4196 4201 "please merge with an explicit rev or bookmark"),
4197 4202 hint=_("run 'hg heads' to see all heads"))
4198 4203 elif len(bmheads) <= 1:
4199 4204 raise util.Abort(_("no matching bookmark to merge - "
4200 4205 "please merge with an explicit rev or bookmark"),
4201 4206 hint=_("run 'hg heads' to see all heads"))
4202 4207
4203 4208 if not node and not repo._bookmarkcurrent:
4204 4209 branch = repo[None].branch()
4205 4210 bheads = repo.branchheads(branch)
4206 4211 nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
4207 4212
4208 4213 if len(nbhs) > 2:
4209 4214 raise util.Abort(_("branch '%s' has %d heads - "
4210 4215 "please merge with an explicit rev")
4211 4216 % (branch, len(bheads)),
4212 4217 hint=_("run 'hg heads .' to see heads"))
4213 4218
4214 4219 parent = repo.dirstate.p1()
4215 4220 if len(nbhs) <= 1:
4216 4221 if len(bheads) > 1:
4217 4222 raise util.Abort(_("heads are bookmarked - "
4218 4223 "please merge with an explicit rev"),
4219 4224 hint=_("run 'hg heads' to see all heads"))
4220 4225 if len(repo.heads()) > 1:
4221 4226 raise util.Abort(_("branch '%s' has one head - "
4222 4227 "please merge with an explicit rev")
4223 4228 % branch,
4224 4229 hint=_("run 'hg heads' to see all heads"))
4225 4230 msg, hint = _('nothing to merge'), None
4226 4231 if parent != repo.lookup(branch):
4227 4232 hint = _("use 'hg update' instead")
4228 4233 raise util.Abort(msg, hint=hint)
4229 4234
4230 4235 if parent not in bheads:
4231 4236 raise util.Abort(_('working directory not at a head revision'),
4232 4237 hint=_("use 'hg update' or merge with an "
4233 4238 "explicit revision"))
4234 4239 if parent == nbhs[0]:
4235 4240 node = nbhs[-1]
4236 4241 else:
4237 4242 node = nbhs[0]
4238 4243
4239 4244 if opts.get('preview'):
4240 4245 # find nodes that are ancestors of p2 but not of p1
4241 4246 p1 = repo.lookup('.')
4242 4247 p2 = repo.lookup(node)
4243 4248 nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
4244 4249
4245 4250 displayer = cmdutil.show_changeset(ui, repo, opts)
4246 4251 for node in nodes:
4247 4252 displayer.show(repo[node])
4248 4253 displayer.close()
4249 4254 return 0
4250 4255
4251 4256 try:
4252 4257 # ui.forcemerge is an internal variable, do not document
4253 4258 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
4254 4259 return hg.merge(repo, node, force=opts.get('force'))
4255 4260 finally:
4256 4261 ui.setconfig('ui', 'forcemerge', '')
4257 4262
4258 4263 @command('outgoing|out',
4259 4264 [('f', 'force', None, _('run even when the destination is unrelated')),
4260 4265 ('r', 'rev', [],
4261 4266 _('a changeset intended to be included in the destination'), _('REV')),
4262 4267 ('n', 'newest-first', None, _('show newest record first')),
4263 4268 ('B', 'bookmarks', False, _('compare bookmarks')),
4264 4269 ('b', 'branch', [], _('a specific branch you would like to push'),
4265 4270 _('BRANCH')),
4266 4271 ] + logopts + remoteopts + subrepoopts,
4267 4272 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
4268 4273 def outgoing(ui, repo, dest=None, **opts):
4269 4274 """show changesets not found in the destination
4270 4275
4271 4276 Show changesets not found in the specified destination repository
4272 4277 or the default push location. These are the changesets that would
4273 4278 be pushed if a push was requested.
4274 4279
4275 4280 See pull for details of valid destination formats.
4276 4281
4277 4282 Returns 0 if there are outgoing changes, 1 otherwise.
4278 4283 """
4279 4284 if opts.get('graph'):
4280 4285 cmdutil.checkunsupportedgraphflags([], opts)
4281 4286 o = hg._outgoing(ui, repo, dest, opts)
4282 4287 if o is None:
4283 4288 return
4284 4289
4285 4290 revdag = cmdutil.graphrevs(repo, o, opts)
4286 4291 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
4287 4292 showparents = [ctx.node() for ctx in repo[None].parents()]
4288 4293 cmdutil.displaygraph(ui, revdag, displayer, showparents,
4289 4294 graphmod.asciiedges)
4290 4295 return 0
4291 4296
4292 4297 if opts.get('bookmarks'):
4293 4298 dest = ui.expandpath(dest or 'default-push', dest or 'default')
4294 4299 dest, branches = hg.parseurl(dest, opts.get('branch'))
4295 4300 other = hg.peer(repo, opts, dest)
4296 4301 if 'bookmarks' not in other.listkeys('namespaces'):
4297 4302 ui.warn(_("remote doesn't support bookmarks\n"))
4298 4303 return 0
4299 4304 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
4300 4305 return bookmarks.diff(ui, other, repo)
4301 4306
4302 4307 repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
4303 4308 try:
4304 4309 return hg.outgoing(ui, repo, dest, opts)
4305 4310 finally:
4306 4311 del repo._subtoppath
4307 4312
4308 4313 @command('parents',
4309 4314 [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
4310 4315 ] + templateopts,
4311 4316 _('[-r REV] [FILE]'))
4312 4317 def parents(ui, repo, file_=None, **opts):
4313 4318 """show the parents of the working directory or revision
4314 4319
4315 4320 Print the working directory's parent revisions. If a revision is
4316 4321 given via -r/--rev, the parent of that revision will be printed.
4317 4322 If a file argument is given, the revision in which the file was
4318 4323 last changed (before the working directory revision or the
4319 4324 argument to --rev if given) is printed.
4320 4325
4321 4326 Returns 0 on success.
4322 4327 """
4323 4328
4324 4329 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
4325 4330
4326 4331 if file_:
4327 4332 m = scmutil.match(ctx, (file_,), opts)
4328 4333 if m.anypats() or len(m.files()) != 1:
4329 4334 raise util.Abort(_('can only specify an explicit filename'))
4330 4335 file_ = m.files()[0]
4331 4336 filenodes = []
4332 4337 for cp in ctx.parents():
4333 4338 if not cp:
4334 4339 continue
4335 4340 try:
4336 4341 filenodes.append(cp.filenode(file_))
4337 4342 except error.LookupError:
4338 4343 pass
4339 4344 if not filenodes:
4340 4345 raise util.Abort(_("'%s' not found in manifest!") % file_)
4341 4346 fl = repo.file(file_)
4342 4347 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
4343 4348 else:
4344 4349 p = [cp.node() for cp in ctx.parents()]
4345 4350
4346 4351 displayer = cmdutil.show_changeset(ui, repo, opts)
4347 4352 for n in p:
4348 4353 if n != nullid:
4349 4354 displayer.show(repo[n])
4350 4355 displayer.close()
4351 4356
4352 4357 @command('paths', [], _('[NAME]'))
4353 4358 def paths(ui, repo, search=None):
4354 4359 """show aliases for remote repositories
4355 4360
4356 4361 Show definition of symbolic path name NAME. If no name is given,
4357 4362 show definition of all available names.
4358 4363
4359 4364 Option -q/--quiet suppresses all output when searching for NAME
4360 4365 and shows only the path names when listing all definitions.
4361 4366
4362 4367 Path names are defined in the [paths] section of your
4363 4368 configuration file and in ``/etc/mercurial/hgrc``. If run inside a
4364 4369 repository, ``.hg/hgrc`` is used, too.
4365 4370
4366 4371 The path names ``default`` and ``default-push`` have a special
4367 4372 meaning. When performing a push or pull operation, they are used
4368 4373 as fallbacks if no location is specified on the command-line.
4369 4374 When ``default-push`` is set, it will be used for push and
4370 4375 ``default`` will be used for pull; otherwise ``default`` is used
4371 4376 as the fallback for both. When cloning a repository, the clone
4372 4377 source is written as ``default`` in ``.hg/hgrc``. Note that
4373 4378 ``default`` and ``default-push`` apply to all inbound (e.g.
4374 4379 :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and
4375 4380 :hg:`bundle`) operations.
4376 4381
4377 4382 See :hg:`help urls` for more information.
4378 4383
4379 4384 Returns 0 on success.
4380 4385 """
4381 4386 if search:
4382 4387 for name, path in ui.configitems("paths"):
4383 4388 if name == search:
4384 4389 ui.status("%s\n" % util.hidepassword(path))
4385 4390 return
4386 4391 if not ui.quiet:
4387 4392 ui.warn(_("not found!\n"))
4388 4393 return 1
4389 4394 else:
4390 4395 for name, path in ui.configitems("paths"):
4391 4396 if ui.quiet:
4392 4397 ui.write("%s\n" % name)
4393 4398 else:
4394 4399 ui.write("%s = %s\n" % (name, util.hidepassword(path)))
4395 4400
4396 4401 @command('phase',
4397 4402 [('p', 'public', False, _('set changeset phase to public')),
4398 4403 ('d', 'draft', False, _('set changeset phase to draft')),
4399 4404 ('s', 'secret', False, _('set changeset phase to secret')),
4400 4405 ('f', 'force', False, _('allow to move boundary backward')),
4401 4406 ('r', 'rev', [], _('target revision'), _('REV')),
4402 4407 ],
4403 4408 _('[-p|-d|-s] [-f] [-r] REV...'))
4404 4409 def phase(ui, repo, *revs, **opts):
4405 4410 """set or show the current phase name
4406 4411
4407 4412 With no argument, show the phase name of specified revisions.
4408 4413
4409 4414 With one of -p/--public, -d/--draft or -s/--secret, change the
4410 4415 phase value of the specified revisions.
4411 4416
4412 4417 Unless -f/--force is specified, :hg:`phase` won't move changeset from a
4413 4418 lower phase to an higher phase. Phases are ordered as follows::
4414 4419
4415 4420 public < draft < secret
4416 4421
4417 4422 Return 0 on success, 1 if no phases were changed or some could not
4418 4423 be changed.
4419 4424 """
4420 4425 # search for a unique phase argument
4421 4426 targetphase = None
4422 4427 for idx, name in enumerate(phases.phasenames):
4423 4428 if opts[name]:
4424 4429 if targetphase is not None:
4425 4430 raise util.Abort(_('only one phase can be specified'))
4426 4431 targetphase = idx
4427 4432
4428 4433 # look for specified revision
4429 4434 revs = list(revs)
4430 4435 revs.extend(opts['rev'])
4431 4436 if not revs:
4432 4437 raise util.Abort(_('no revisions specified'))
4433 4438
4434 4439 revs = scmutil.revrange(repo, revs)
4435 4440
4436 4441 lock = None
4437 4442 ret = 0
4438 4443 if targetphase is None:
4439 4444 # display
4440 4445 for r in revs:
4441 4446 ctx = repo[r]
4442 4447 ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr()))
4443 4448 else:
4444 4449 lock = repo.lock()
4445 4450 try:
4446 4451 # set phase
4447 4452 if not revs:
4448 4453 raise util.Abort(_('empty revision set'))
4449 4454 nodes = [repo[r].node() for r in revs]
4450 4455 olddata = repo._phasecache.getphaserevs(repo)[:]
4451 4456 phases.advanceboundary(repo, targetphase, nodes)
4452 4457 if opts['force']:
4453 4458 phases.retractboundary(repo, targetphase, nodes)
4454 4459 finally:
4455 4460 lock.release()
4456 4461 # moving revision from public to draft may hide them
4457 4462 # We have to check result on an unfiltered repository
4458 4463 unfi = repo.unfiltered()
4459 4464 newdata = repo._phasecache.getphaserevs(unfi)
4460 4465 changes = sum(o != newdata[i] for i, o in enumerate(olddata))
4461 4466 cl = unfi.changelog
4462 4467 rejected = [n for n in nodes
4463 4468 if newdata[cl.rev(n)] < targetphase]
4464 4469 if rejected:
4465 4470 ui.warn(_('cannot move %i changesets to a more permissive '
4466 4471 'phase, use --force\n') % len(rejected))
4467 4472 ret = 1
4468 4473 if changes:
4469 4474 msg = _('phase changed for %i changesets\n') % changes
4470 4475 if ret:
4471 4476 ui.status(msg)
4472 4477 else:
4473 4478 ui.note(msg)
4474 4479 else:
4475 4480 ui.warn(_('no phases changed\n'))
4476 4481 ret = 1
4477 4482 return ret
4478 4483
4479 4484 def postincoming(ui, repo, modheads, optupdate, checkout):
4480 4485 if modheads == 0:
4481 4486 return
4482 4487 if optupdate:
4483 4488 movemarkfrom = repo['.'].node()
4484 4489 try:
4485 4490 ret = hg.update(repo, checkout)
4486 4491 except util.Abort, inst:
4487 4492 ui.warn(_("not updating: %s\n") % str(inst))
4488 4493 return 0
4489 4494 if not ret and not checkout:
4490 4495 if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
4491 4496 ui.status(_("updating bookmark %s\n") % repo._bookmarkcurrent)
4492 4497 return ret
4493 4498 if modheads > 1:
4494 4499 currentbranchheads = len(repo.branchheads())
4495 4500 if currentbranchheads == modheads:
4496 4501 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
4497 4502 elif currentbranchheads > 1:
4498 4503 ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to "
4499 4504 "merge)\n"))
4500 4505 else:
4501 4506 ui.status(_("(run 'hg heads' to see heads)\n"))
4502 4507 else:
4503 4508 ui.status(_("(run 'hg update' to get a working copy)\n"))
4504 4509
4505 4510 @command('^pull',
4506 4511 [('u', 'update', None,
4507 4512 _('update to new branch head if changesets were pulled')),
4508 4513 ('f', 'force', None, _('run even when remote repository is unrelated')),
4509 4514 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
4510 4515 ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
4511 4516 ('b', 'branch', [], _('a specific branch you would like to pull'),
4512 4517 _('BRANCH')),
4513 4518 ] + remoteopts,
4514 4519 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
4515 4520 def pull(ui, repo, source="default", **opts):
4516 4521 """pull changes from the specified source
4517 4522
4518 4523 Pull changes from a remote repository to a local one.
4519 4524
4520 4525 This finds all changes from the repository at the specified path
4521 4526 or URL and adds them to a local repository (the current one unless
4522 4527 -R is specified). By default, this does not update the copy of the
4523 4528 project in the working directory.
4524 4529
4525 4530 Use :hg:`incoming` if you want to see what would have been added
4526 4531 by a pull at the time you issued this command. If you then decide
4527 4532 to add those changes to the repository, you should use :hg:`pull
4528 4533 -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
4529 4534
4530 4535 If SOURCE is omitted, the 'default' path will be used.
4531 4536 See :hg:`help urls` for more information.
4532 4537
4533 4538 Returns 0 on success, 1 if an update had unresolved files.
4534 4539 """
4535 4540 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
4536 4541 other = hg.peer(repo, opts, source)
4537 4542 ui.status(_('pulling from %s\n') % util.hidepassword(source))
4538 4543 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
4539 4544
4540 4545 remotebookmarks = other.listkeys('bookmarks')
4541 4546
4542 4547 if opts.get('bookmark'):
4543 4548 if not revs:
4544 4549 revs = []
4545 4550 for b in opts['bookmark']:
4546 4551 if b not in remotebookmarks:
4547 4552 raise util.Abort(_('remote bookmark %s not found!') % b)
4548 4553 revs.append(remotebookmarks[b])
4549 4554
4550 4555 if revs:
4551 4556 try:
4552 4557 revs = [other.lookup(rev) for rev in revs]
4553 4558 except error.CapabilityError:
4554 4559 err = _("other repository doesn't support revision lookup, "
4555 4560 "so a rev cannot be specified.")
4556 4561 raise util.Abort(err)
4557 4562
4558 4563 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
4559 4564 bookmarks.updatefromremote(ui, repo, remotebookmarks, source)
4560 4565 if checkout:
4561 4566 checkout = str(repo.changelog.rev(other.lookup(checkout)))
4562 4567 repo._subtoppath = source
4563 4568 try:
4564 4569 ret = postincoming(ui, repo, modheads, opts.get('update'), checkout)
4565 4570
4566 4571 finally:
4567 4572 del repo._subtoppath
4568 4573
4569 4574 # update specified bookmarks
4570 4575 if opts.get('bookmark'):
4571 4576 marks = repo._bookmarks
4572 4577 for b in opts['bookmark']:
4573 4578 # explicit pull overrides local bookmark if any
4574 4579 ui.status(_("importing bookmark %s\n") % b)
4575 4580 marks[b] = repo[remotebookmarks[b]].node()
4576 4581 marks.write()
4577 4582
4578 4583 return ret
4579 4584
4580 4585 @command('^push',
4581 4586 [('f', 'force', None, _('force push')),
4582 4587 ('r', 'rev', [],
4583 4588 _('a changeset intended to be included in the destination'),
4584 4589 _('REV')),
4585 4590 ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
4586 4591 ('b', 'branch', [],
4587 4592 _('a specific branch you would like to push'), _('BRANCH')),
4588 4593 ('', 'new-branch', False, _('allow pushing a new branch')),
4589 4594 ] + remoteopts,
4590 4595 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
4591 4596 def push(ui, repo, dest=None, **opts):
4592 4597 """push changes to the specified destination
4593 4598
4594 4599 Push changesets from the local repository to the specified
4595 4600 destination.
4596 4601
4597 4602 This operation is symmetrical to pull: it is identical to a pull
4598 4603 in the destination repository from the current one.
4599 4604
4600 4605 By default, push will not allow creation of new heads at the
4601 4606 destination, since multiple heads would make it unclear which head
4602 4607 to use. In this situation, it is recommended to pull and merge
4603 4608 before pushing.
4604 4609
4605 4610 Use --new-branch if you want to allow push to create a new named
4606 4611 branch that is not present at the destination. This allows you to
4607 4612 only create a new branch without forcing other changes.
4608 4613
4609 4614 Use -f/--force to override the default behavior and push all
4610 4615 changesets on all branches.
4611 4616
4612 4617 If -r/--rev is used, the specified revision and all its ancestors
4613 4618 will be pushed to the remote repository.
4614 4619
4615 4620 If -B/--bookmark is used, the specified bookmarked revision, its
4616 4621 ancestors, and the bookmark will be pushed to the remote
4617 4622 repository.
4618 4623
4619 4624 Please see :hg:`help urls` for important details about ``ssh://``
4620 4625 URLs. If DESTINATION is omitted, a default path will be used.
4621 4626
4622 4627 Returns 0 if push was successful, 1 if nothing to push.
4623 4628 """
4624 4629
4625 4630 if opts.get('bookmark'):
4626 4631 for b in opts['bookmark']:
4627 4632 # translate -B options to -r so changesets get pushed
4628 4633 if b in repo._bookmarks:
4629 4634 opts.setdefault('rev', []).append(b)
4630 4635 else:
4631 4636 # if we try to push a deleted bookmark, translate it to null
4632 4637 # this lets simultaneous -r, -b options continue working
4633 4638 opts.setdefault('rev', []).append("null")
4634 4639
4635 4640 dest = ui.expandpath(dest or 'default-push', dest or 'default')
4636 4641 dest, branches = hg.parseurl(dest, opts.get('branch'))
4637 4642 ui.status(_('pushing to %s\n') % util.hidepassword(dest))
4638 4643 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
4639 4644 other = hg.peer(repo, opts, dest)
4640 4645 if revs:
4641 4646 revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)]
4642 4647
4643 4648 repo._subtoppath = dest
4644 4649 try:
4645 4650 # push subrepos depth-first for coherent ordering
4646 4651 c = repo['']
4647 4652 subs = c.substate # only repos that are committed
4648 4653 for s in sorted(subs):
4649 4654 if c.sub(s).push(opts) == 0:
4650 4655 return False
4651 4656 finally:
4652 4657 del repo._subtoppath
4653 4658 result = repo.push(other, opts.get('force'), revs=revs,
4654 4659 newbranch=opts.get('new_branch'))
4655 4660
4656 4661 result = not result
4657 4662
4658 4663 if opts.get('bookmark'):
4659 4664 rb = other.listkeys('bookmarks')
4660 4665 for b in opts['bookmark']:
4661 4666 # explicit push overrides remote bookmark if any
4662 4667 if b in repo._bookmarks:
4663 4668 ui.status(_("exporting bookmark %s\n") % b)
4664 4669 new = repo[b].hex()
4665 4670 elif b in rb:
4666 4671 ui.status(_("deleting remote bookmark %s\n") % b)
4667 4672 new = '' # delete
4668 4673 else:
4669 4674 ui.warn(_('bookmark %s does not exist on the local '
4670 4675 'or remote repository!\n') % b)
4671 4676 return 2
4672 4677 old = rb.get(b, '')
4673 4678 r = other.pushkey('bookmarks', b, old, new)
4674 4679 if not r:
4675 4680 ui.warn(_('updating bookmark %s failed!\n') % b)
4676 4681 if not result:
4677 4682 result = 2
4678 4683
4679 4684 return result
4680 4685
4681 4686 @command('recover', [])
4682 4687 def recover(ui, repo):
4683 4688 """roll back an interrupted transaction
4684 4689
4685 4690 Recover from an interrupted commit or pull.
4686 4691
4687 4692 This command tries to fix the repository status after an
4688 4693 interrupted operation. It should only be necessary when Mercurial
4689 4694 suggests it.
4690 4695
4691 4696 Returns 0 if successful, 1 if nothing to recover or verify fails.
4692 4697 """
4693 4698 if repo.recover():
4694 4699 return hg.verify(repo)
4695 4700 return 1
4696 4701
4697 4702 @command('^remove|rm',
4698 4703 [('A', 'after', None, _('record delete for missing files')),
4699 4704 ('f', 'force', None,
4700 4705 _('remove (and delete) file even if added or modified')),
4701 4706 ] + walkopts,
4702 4707 _('[OPTION]... FILE...'))
4703 4708 def remove(ui, repo, *pats, **opts):
4704 4709 """remove the specified files on the next commit
4705 4710
4706 4711 Schedule the indicated files for removal from the current branch.
4707 4712
4708 4713 This command schedules the files to be removed at the next commit.
4709 4714 To undo a remove before that, see :hg:`revert`. To undo added
4710 4715 files, see :hg:`forget`.
4711 4716
4712 4717 .. container:: verbose
4713 4718
4714 4719 -A/--after can be used to remove only files that have already
4715 4720 been deleted, -f/--force can be used to force deletion, and -Af
4716 4721 can be used to remove files from the next revision without
4717 4722 deleting them from the working directory.
4718 4723
4719 4724 The following table details the behavior of remove for different
4720 4725 file states (columns) and option combinations (rows). The file
4721 4726 states are Added [A], Clean [C], Modified [M] and Missing [!]
4722 4727 (as reported by :hg:`status`). The actions are Warn, Remove
4723 4728 (from branch) and Delete (from disk):
4724 4729
4725 4730 ======= == == == ==
4726 4731 A C M !
4727 4732 ======= == == == ==
4728 4733 none W RD W R
4729 4734 -f R RD RD R
4730 4735 -A W W W R
4731 4736 -Af R R R R
4732 4737 ======= == == == ==
4733 4738
4734 4739 Note that remove never deletes files in Added [A] state from the
4735 4740 working directory, not even if option --force is specified.
4736 4741
4737 4742 Returns 0 on success, 1 if any warnings encountered.
4738 4743 """
4739 4744
4740 4745 ret = 0
4741 4746 after, force = opts.get('after'), opts.get('force')
4742 4747 if not pats and not after:
4743 4748 raise util.Abort(_('no files specified'))
4744 4749
4745 4750 m = scmutil.match(repo[None], pats, opts)
4746 4751 s = repo.status(match=m, clean=True)
4747 4752 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
4748 4753
4749 4754 # warn about failure to delete explicit files/dirs
4750 4755 wctx = repo[None]
4751 4756 for f in m.files():
4752 4757 if f in repo.dirstate or f in wctx.dirs():
4753 4758 continue
4754 4759 if os.path.exists(m.rel(f)):
4755 4760 if os.path.isdir(m.rel(f)):
4756 4761 ui.warn(_('not removing %s: no tracked files\n') % m.rel(f))
4757 4762 else:
4758 4763 ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
4759 4764 # missing files will generate a warning elsewhere
4760 4765 ret = 1
4761 4766
4762 4767 if force:
4763 4768 list = modified + deleted + clean + added
4764 4769 elif after:
4765 4770 list = deleted
4766 4771 for f in modified + added + clean:
4767 4772 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
4768 4773 ret = 1
4769 4774 else:
4770 4775 list = deleted + clean
4771 4776 for f in modified:
4772 4777 ui.warn(_('not removing %s: file is modified (use -f'
4773 4778 ' to force removal)\n') % m.rel(f))
4774 4779 ret = 1
4775 4780 for f in added:
4776 4781 ui.warn(_('not removing %s: file has been marked for add'
4777 4782 ' (use forget to undo)\n') % m.rel(f))
4778 4783 ret = 1
4779 4784
4780 4785 for f in sorted(list):
4781 4786 if ui.verbose or not m.exact(f):
4782 4787 ui.status(_('removing %s\n') % m.rel(f))
4783 4788
4784 4789 wlock = repo.wlock()
4785 4790 try:
4786 4791 if not after:
4787 4792 for f in list:
4788 4793 if f in added:
4789 4794 continue # we never unlink added files on remove
4790 4795 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
4791 4796 repo[None].forget(list)
4792 4797 finally:
4793 4798 wlock.release()
4794 4799
4795 4800 return ret
4796 4801
4797 4802 @command('rename|move|mv',
4798 4803 [('A', 'after', None, _('record a rename that has already occurred')),
4799 4804 ('f', 'force', None, _('forcibly copy over an existing managed file')),
4800 4805 ] + walkopts + dryrunopts,
4801 4806 _('[OPTION]... SOURCE... DEST'))
4802 4807 def rename(ui, repo, *pats, **opts):
4803 4808 """rename files; equivalent of copy + remove
4804 4809
4805 4810 Mark dest as copies of sources; mark sources for deletion. If dest
4806 4811 is a directory, copies are put in that directory. If dest is a
4807 4812 file, there can only be one source.
4808 4813
4809 4814 By default, this command copies the contents of files as they
4810 4815 exist in the working directory. If invoked with -A/--after, the
4811 4816 operation is recorded, but no copying is performed.
4812 4817
4813 4818 This command takes effect at the next commit. To undo a rename
4814 4819 before that, see :hg:`revert`.
4815 4820
4816 4821 Returns 0 on success, 1 if errors are encountered.
4817 4822 """
4818 4823 wlock = repo.wlock(False)
4819 4824 try:
4820 4825 return cmdutil.copy(ui, repo, pats, opts, rename=True)
4821 4826 finally:
4822 4827 wlock.release()
4823 4828
4824 4829 @command('resolve',
4825 4830 [('a', 'all', None, _('select all unresolved files')),
4826 4831 ('l', 'list', None, _('list state of files needing merge')),
4827 4832 ('m', 'mark', None, _('mark files as resolved')),
4828 4833 ('u', 'unmark', None, _('mark files as unresolved')),
4829 4834 ('n', 'no-status', None, _('hide status prefix'))]
4830 4835 + mergetoolopts + walkopts,
4831 4836 _('[OPTION]... [FILE]...'))
4832 4837 def resolve(ui, repo, *pats, **opts):
4833 4838 """redo merges or set/view the merge status of files
4834 4839
4835 4840 Merges with unresolved conflicts are often the result of
4836 4841 non-interactive merging using the ``internal:merge`` configuration
4837 4842 setting, or a command-line merge tool like ``diff3``. The resolve
4838 4843 command is used to manage the files involved in a merge, after
4839 4844 :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
4840 4845 working directory must have two parents). See :hg:`help
4841 4846 merge-tools` for information on configuring merge tools.
4842 4847
4843 4848 The resolve command can be used in the following ways:
4844 4849
4845 4850 - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
4846 4851 files, discarding any previous merge attempts. Re-merging is not
4847 4852 performed for files already marked as resolved. Use ``--all/-a``
4848 4853 to select all unresolved files. ``--tool`` can be used to specify
4849 4854 the merge tool used for the given files. It overrides the HGMERGE
4850 4855 environment variable and your configuration files. Previous file
4851 4856 contents are saved with a ``.orig`` suffix.
4852 4857
4853 4858 - :hg:`resolve -m [FILE]`: mark a file as having been resolved
4854 4859 (e.g. after having manually fixed-up the files). The default is
4855 4860 to mark all unresolved files.
4856 4861
4857 4862 - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
4858 4863 default is to mark all resolved files.
4859 4864
4860 4865 - :hg:`resolve -l`: list files which had or still have conflicts.
4861 4866 In the printed list, ``U`` = unresolved and ``R`` = resolved.
4862 4867
4863 4868 Note that Mercurial will not let you commit files with unresolved
4864 4869 merge conflicts. You must use :hg:`resolve -m ...` before you can
4865 4870 commit after a conflicting merge.
4866 4871
4867 4872 Returns 0 on success, 1 if any files fail a resolve attempt.
4868 4873 """
4869 4874
4870 4875 all, mark, unmark, show, nostatus = \
4871 4876 [opts.get(o) for o in 'all mark unmark list no_status'.split()]
4872 4877
4873 4878 if (show and (mark or unmark)) or (mark and unmark):
4874 4879 raise util.Abort(_("too many options specified"))
4875 4880 if pats and all:
4876 4881 raise util.Abort(_("can't specify --all and patterns"))
4877 4882 if not (all or pats or show or mark or unmark):
4878 4883 raise util.Abort(_('no files or directories specified; '
4879 4884 'use --all to remerge all files'))
4880 4885
4881 4886 ms = mergemod.mergestate(repo)
4882 4887 m = scmutil.match(repo[None], pats, opts)
4883 4888 ret = 0
4884 4889
4885 4890 for f in ms:
4886 4891 if m(f):
4887 4892 if show:
4888 4893 if nostatus:
4889 4894 ui.write("%s\n" % f)
4890 4895 else:
4891 4896 ui.write("%s %s\n" % (ms[f].upper(), f),
4892 4897 label='resolve.' +
4893 4898 {'u': 'unresolved', 'r': 'resolved'}[ms[f]])
4894 4899 elif mark:
4895 4900 ms.mark(f, "r")
4896 4901 elif unmark:
4897 4902 ms.mark(f, "u")
4898 4903 else:
4899 4904 wctx = repo[None]
4900 4905 mctx = wctx.parents()[-1]
4901 4906
4902 4907 # backup pre-resolve (merge uses .orig for its own purposes)
4903 4908 a = repo.wjoin(f)
4904 4909 util.copyfile(a, a + ".resolve")
4905 4910
4906 4911 try:
4907 4912 # resolve file
4908 4913 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
4909 4914 if ms.resolve(f, wctx, mctx):
4910 4915 ret = 1
4911 4916 finally:
4912 4917 ui.setconfig('ui', 'forcemerge', '')
4913 4918 ms.commit()
4914 4919
4915 4920 # replace filemerge's .orig file with our resolve file
4916 4921 util.rename(a + ".resolve", a + ".orig")
4917 4922
4918 4923 ms.commit()
4919 4924 return ret
4920 4925
4921 4926 @command('revert',
4922 4927 [('a', 'all', None, _('revert all changes when no arguments given')),
4923 4928 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
4924 4929 ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
4925 4930 ('C', 'no-backup', None, _('do not save backup copies of files')),
4926 4931 ] + walkopts + dryrunopts,
4927 4932 _('[OPTION]... [-r REV] [NAME]...'))
4928 4933 def revert(ui, repo, *pats, **opts):
4929 4934 """restore files to their checkout state
4930 4935
4931 4936 .. note::
4932 4937
4933 4938 To check out earlier revisions, you should use :hg:`update REV`.
4934 4939 To cancel an uncommitted merge (and lose your changes), use
4935 4940 :hg:`update --clean .`.
4936 4941
4937 4942 With no revision specified, revert the specified files or directories
4938 4943 to the contents they had in the parent of the working directory.
4939 4944 This restores the contents of files to an unmodified
4940 4945 state and unschedules adds, removes, copies, and renames. If the
4941 4946 working directory has two parents, you must explicitly specify a
4942 4947 revision.
4943 4948
4944 4949 Using the -r/--rev or -d/--date options, revert the given files or
4945 4950 directories to their states as of a specific revision. Because
4946 4951 revert does not change the working directory parents, this will
4947 4952 cause these files to appear modified. This can be helpful to "back
4948 4953 out" some or all of an earlier change. See :hg:`backout` for a
4949 4954 related method.
4950 4955
4951 4956 Modified files are saved with a .orig suffix before reverting.
4952 4957 To disable these backups, use --no-backup.
4953 4958
4954 4959 See :hg:`help dates` for a list of formats valid for -d/--date.
4955 4960
4956 4961 Returns 0 on success.
4957 4962 """
4958 4963
4959 4964 if opts.get("date"):
4960 4965 if opts.get("rev"):
4961 4966 raise util.Abort(_("you can't specify a revision and a date"))
4962 4967 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
4963 4968
4964 4969 parent, p2 = repo.dirstate.parents()
4965 4970 if not opts.get('rev') and p2 != nullid:
4966 4971 # revert after merge is a trap for new users (issue2915)
4967 4972 raise util.Abort(_('uncommitted merge with no revision specified'),
4968 4973 hint=_('use "hg update" or see "hg help revert"'))
4969 4974
4970 4975 ctx = scmutil.revsingle(repo, opts.get('rev'))
4971 4976
4972 4977 if not pats and not opts.get('all'):
4973 4978 msg = _("no files or directories specified")
4974 4979 if p2 != nullid:
4975 4980 hint = _("uncommitted merge, use --all to discard all changes,"
4976 4981 " or 'hg update -C .' to abort the merge")
4977 4982 raise util.Abort(msg, hint=hint)
4978 4983 dirty = util.any(repo.status())
4979 4984 node = ctx.node()
4980 4985 if node != parent:
4981 4986 if dirty:
4982 4987 hint = _("uncommitted changes, use --all to discard all"
4983 4988 " changes, or 'hg update %s' to update") % ctx.rev()
4984 4989 else:
4985 4990 hint = _("use --all to revert all files,"
4986 4991 " or 'hg update %s' to update") % ctx.rev()
4987 4992 elif dirty:
4988 4993 hint = _("uncommitted changes, use --all to discard all changes")
4989 4994 else:
4990 4995 hint = _("use --all to revert all files")
4991 4996 raise util.Abort(msg, hint=hint)
4992 4997
4993 4998 return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats, **opts)
4994 4999
4995 5000 @command('rollback', dryrunopts +
4996 5001 [('f', 'force', False, _('ignore safety measures'))])
4997 5002 def rollback(ui, repo, **opts):
4998 5003 """roll back the last transaction (dangerous)
4999 5004
5000 5005 This command should be used with care. There is only one level of
5001 5006 rollback, and there is no way to undo a rollback. It will also
5002 5007 restore the dirstate at the time of the last transaction, losing
5003 5008 any dirstate changes since that time. This command does not alter
5004 5009 the working directory.
5005 5010
5006 5011 Transactions are used to encapsulate the effects of all commands
5007 5012 that create new changesets or propagate existing changesets into a
5008 5013 repository.
5009 5014
5010 5015 .. container:: verbose
5011 5016
5012 5017 For example, the following commands are transactional, and their
5013 5018 effects can be rolled back:
5014 5019
5015 5020 - commit
5016 5021 - import
5017 5022 - pull
5018 5023 - push (with this repository as the destination)
5019 5024 - unbundle
5020 5025
5021 5026 To avoid permanent data loss, rollback will refuse to rollback a
5022 5027 commit transaction if it isn't checked out. Use --force to
5023 5028 override this protection.
5024 5029
5025 5030 This command is not intended for use on public repositories. Once
5026 5031 changes are visible for pull by other users, rolling a transaction
5027 5032 back locally is ineffective (someone else may already have pulled
5028 5033 the changes). Furthermore, a race is possible with readers of the
5029 5034 repository; for example an in-progress pull from the repository
5030 5035 may fail if a rollback is performed.
5031 5036
5032 5037 Returns 0 on success, 1 if no rollback data is available.
5033 5038 """
5034 5039 return repo.rollback(dryrun=opts.get('dry_run'),
5035 5040 force=opts.get('force'))
5036 5041
5037 5042 @command('root', [])
5038 5043 def root(ui, repo):
5039 5044 """print the root (top) of the current working directory
5040 5045
5041 5046 Print the root directory of the current repository.
5042 5047
5043 5048 Returns 0 on success.
5044 5049 """
5045 5050 ui.write(repo.root + "\n")
5046 5051
5047 5052 @command('^serve',
5048 5053 [('A', 'accesslog', '', _('name of access log file to write to'),
5049 5054 _('FILE')),
5050 5055 ('d', 'daemon', None, _('run server in background')),
5051 5056 ('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('NUM')),
5052 5057 ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
5053 5058 # use string type, then we can check if something was passed
5054 5059 ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
5055 5060 ('a', 'address', '', _('address to listen on (default: all interfaces)'),
5056 5061 _('ADDR')),
5057 5062 ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
5058 5063 _('PREFIX')),
5059 5064 ('n', 'name', '',
5060 5065 _('name to show in web pages (default: working directory)'), _('NAME')),
5061 5066 ('', 'web-conf', '',
5062 5067 _('name of the hgweb config file (see "hg help hgweb")'), _('FILE')),
5063 5068 ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
5064 5069 _('FILE')),
5065 5070 ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
5066 5071 ('', 'stdio', None, _('for remote clients')),
5067 5072 ('', 'cmdserver', '', _('for remote clients'), _('MODE')),
5068 5073 ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
5069 5074 ('', 'style', '', _('template style to use'), _('STYLE')),
5070 5075 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
5071 5076 ('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
5072 5077 _('[OPTION]...'))
5073 5078 def serve(ui, repo, **opts):
5074 5079 """start stand-alone webserver
5075 5080
5076 5081 Start a local HTTP repository browser and pull server. You can use
5077 5082 this for ad-hoc sharing and browsing of repositories. It is
5078 5083 recommended to use a real web server to serve a repository for
5079 5084 longer periods of time.
5080 5085
5081 5086 Please note that the server does not implement access control.
5082 5087 This means that, by default, anybody can read from the server and
5083 5088 nobody can write to it by default. Set the ``web.allow_push``
5084 5089 option to ``*`` to allow everybody to push to the server. You
5085 5090 should use a real web server if you need to authenticate users.
5086 5091
5087 5092 By default, the server logs accesses to stdout and errors to
5088 5093 stderr. Use the -A/--accesslog and -E/--errorlog options to log to
5089 5094 files.
5090 5095
5091 5096 To have the server choose a free port number to listen on, specify
5092 5097 a port number of 0; in this case, the server will print the port
5093 5098 number it uses.
5094 5099
5095 5100 Returns 0 on success.
5096 5101 """
5097 5102
5098 5103 if opts["stdio"] and opts["cmdserver"]:
5099 5104 raise util.Abort(_("cannot use --stdio with --cmdserver"))
5100 5105
5101 5106 def checkrepo():
5102 5107 if repo is None:
5103 5108 raise error.RepoError(_("there is no Mercurial repository here"
5104 5109 " (.hg not found)"))
5105 5110
5106 5111 if opts["stdio"]:
5107 5112 checkrepo()
5108 5113 s = sshserver.sshserver(ui, repo)
5109 5114 s.serve_forever()
5110 5115
5111 5116 if opts["cmdserver"]:
5112 5117 checkrepo()
5113 5118 s = commandserver.server(ui, repo, opts["cmdserver"])
5114 5119 return s.serve()
5115 5120
5116 5121 # this way we can check if something was given in the command-line
5117 5122 if opts.get('port'):
5118 5123 opts['port'] = util.getport(opts.get('port'))
5119 5124
5120 5125 baseui = repo and repo.baseui or ui
5121 5126 optlist = ("name templates style address port prefix ipv6"
5122 5127 " accesslog errorlog certificate encoding")
5123 5128 for o in optlist.split():
5124 5129 val = opts.get(o, '')
5125 5130 if val in (None, ''): # should check against default options instead
5126 5131 continue
5127 5132 baseui.setconfig("web", o, val)
5128 5133 if repo and repo.ui != baseui:
5129 5134 repo.ui.setconfig("web", o, val)
5130 5135
5131 5136 o = opts.get('web_conf') or opts.get('webdir_conf')
5132 5137 if not o:
5133 5138 if not repo:
5134 5139 raise error.RepoError(_("there is no Mercurial repository"
5135 5140 " here (.hg not found)"))
5136 5141 o = repo
5137 5142
5138 5143 app = hgweb.hgweb(o, baseui=baseui)
5139 5144
5140 5145 class service(object):
5141 5146 def init(self):
5142 5147 util.setsignalhandler()
5143 5148 self.httpd = hgweb.server.create_server(ui, app)
5144 5149
5145 5150 if opts['port'] and not ui.verbose:
5146 5151 return
5147 5152
5148 5153 if self.httpd.prefix:
5149 5154 prefix = self.httpd.prefix.strip('/') + '/'
5150 5155 else:
5151 5156 prefix = ''
5152 5157
5153 5158 port = ':%d' % self.httpd.port
5154 5159 if port == ':80':
5155 5160 port = ''
5156 5161
5157 5162 bindaddr = self.httpd.addr
5158 5163 if bindaddr == '0.0.0.0':
5159 5164 bindaddr = '*'
5160 5165 elif ':' in bindaddr: # IPv6
5161 5166 bindaddr = '[%s]' % bindaddr
5162 5167
5163 5168 fqaddr = self.httpd.fqaddr
5164 5169 if ':' in fqaddr:
5165 5170 fqaddr = '[%s]' % fqaddr
5166 5171 if opts['port']:
5167 5172 write = ui.status
5168 5173 else:
5169 5174 write = ui.write
5170 5175 write(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
5171 5176 (fqaddr, port, prefix, bindaddr, self.httpd.port))
5172 5177
5173 5178 def run(self):
5174 5179 self.httpd.serve_forever()
5175 5180
5176 5181 service = service()
5177 5182
5178 5183 cmdutil.service(opts, initfn=service.init, runfn=service.run)
5179 5184
5180 5185 @command('showconfig|debugconfig',
5181 5186 [('u', 'untrusted', None, _('show untrusted configuration options'))],
5182 5187 _('[-u] [NAME]...'))
5183 5188 def showconfig(ui, repo, *values, **opts):
5184 5189 """show combined config settings from all hgrc files
5185 5190
5186 5191 With no arguments, print names and values of all config items.
5187 5192
5188 5193 With one argument of the form section.name, print just the value
5189 5194 of that config item.
5190 5195
5191 5196 With multiple arguments, print names and values of all config
5192 5197 items with matching section names.
5193 5198
5194 5199 With --debug, the source (filename and line number) is printed
5195 5200 for each config item.
5196 5201
5197 5202 Returns 0 on success.
5198 5203 """
5199 5204
5200 5205 for f in scmutil.rcpath():
5201 5206 ui.debug('read config from: %s\n' % f)
5202 5207 untrusted = bool(opts.get('untrusted'))
5203 5208 if values:
5204 5209 sections = [v for v in values if '.' not in v]
5205 5210 items = [v for v in values if '.' in v]
5206 5211 if len(items) > 1 or items and sections:
5207 5212 raise util.Abort(_('only one config item permitted'))
5208 5213 for section, name, value in ui.walkconfig(untrusted=untrusted):
5209 5214 value = str(value).replace('\n', '\\n')
5210 5215 sectname = section + '.' + name
5211 5216 if values:
5212 5217 for v in values:
5213 5218 if v == section:
5214 5219 ui.debug('%s: ' %
5215 5220 ui.configsource(section, name, untrusted))
5216 5221 ui.write('%s=%s\n' % (sectname, value))
5217 5222 elif v == sectname:
5218 5223 ui.debug('%s: ' %
5219 5224 ui.configsource(section, name, untrusted))
5220 5225 ui.write(value, '\n')
5221 5226 else:
5222 5227 ui.debug('%s: ' %
5223 5228 ui.configsource(section, name, untrusted))
5224 5229 ui.write('%s=%s\n' % (sectname, value))
5225 5230
5226 5231 @command('^status|st',
5227 5232 [('A', 'all', None, _('show status of all files')),
5228 5233 ('m', 'modified', None, _('show only modified files')),
5229 5234 ('a', 'added', None, _('show only added files')),
5230 5235 ('r', 'removed', None, _('show only removed files')),
5231 5236 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
5232 5237 ('c', 'clean', None, _('show only files without changes')),
5233 5238 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
5234 5239 ('i', 'ignored', None, _('show only ignored files')),
5235 5240 ('n', 'no-status', None, _('hide status prefix')),
5236 5241 ('C', 'copies', None, _('show source of copied files')),
5237 5242 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
5238 5243 ('', 'rev', [], _('show difference from revision'), _('REV')),
5239 5244 ('', 'change', '', _('list the changed files of a revision'), _('REV')),
5240 5245 ] + walkopts + subrepoopts,
5241 5246 _('[OPTION]... [FILE]...'))
5242 5247 def status(ui, repo, *pats, **opts):
5243 5248 """show changed files in the working directory
5244 5249
5245 5250 Show status of files in the repository. If names are given, only
5246 5251 files that match are shown. Files that are clean or ignored or
5247 5252 the source of a copy/move operation, are not listed unless
5248 5253 -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
5249 5254 Unless options described with "show only ..." are given, the
5250 5255 options -mardu are used.
5251 5256
5252 5257 Option -q/--quiet hides untracked (unknown and ignored) files
5253 5258 unless explicitly requested with -u/--unknown or -i/--ignored.
5254 5259
5255 5260 .. note::
5256 5261 status may appear to disagree with diff if permissions have
5257 5262 changed or a merge has occurred. The standard diff format does
5258 5263 not report permission changes and diff only reports changes
5259 5264 relative to one merge parent.
5260 5265
5261 5266 If one revision is given, it is used as the base revision.
5262 5267 If two revisions are given, the differences between them are
5263 5268 shown. The --change option can also be used as a shortcut to list
5264 5269 the changed files of a revision from its first parent.
5265 5270
5266 5271 The codes used to show the status of files are::
5267 5272
5268 5273 M = modified
5269 5274 A = added
5270 5275 R = removed
5271 5276 C = clean
5272 5277 ! = missing (deleted by non-hg command, but still tracked)
5273 5278 ? = not tracked
5274 5279 I = ignored
5275 5280 = origin of the previous file listed as A (added)
5276 5281
5277 5282 .. container:: verbose
5278 5283
5279 5284 Examples:
5280 5285
5281 5286 - show changes in the working directory relative to a
5282 5287 changeset::
5283 5288
5284 5289 hg status --rev 9353
5285 5290
5286 5291 - show all changes including copies in an existing changeset::
5287 5292
5288 5293 hg status --copies --change 9353
5289 5294
5290 5295 - get a NUL separated list of added files, suitable for xargs::
5291 5296
5292 5297 hg status -an0
5293 5298
5294 5299 Returns 0 on success.
5295 5300 """
5296 5301
5297 5302 revs = opts.get('rev')
5298 5303 change = opts.get('change')
5299 5304
5300 5305 if revs and change:
5301 5306 msg = _('cannot specify --rev and --change at the same time')
5302 5307 raise util.Abort(msg)
5303 5308 elif change:
5304 5309 node2 = scmutil.revsingle(repo, change, None).node()
5305 5310 node1 = repo[node2].p1().node()
5306 5311 else:
5307 5312 node1, node2 = scmutil.revpair(repo, revs)
5308 5313
5309 5314 cwd = (pats and repo.getcwd()) or ''
5310 5315 end = opts.get('print0') and '\0' or '\n'
5311 5316 copy = {}
5312 5317 states = 'modified added removed deleted unknown ignored clean'.split()
5313 5318 show = [k for k in states if opts.get(k)]
5314 5319 if opts.get('all'):
5315 5320 show += ui.quiet and (states[:4] + ['clean']) or states
5316 5321 if not show:
5317 5322 show = ui.quiet and states[:4] or states[:5]
5318 5323
5319 5324 stat = repo.status(node1, node2, scmutil.match(repo[node2], pats, opts),
5320 5325 'ignored' in show, 'clean' in show, 'unknown' in show,
5321 5326 opts.get('subrepos'))
5322 5327 changestates = zip(states, 'MAR!?IC', stat)
5323 5328
5324 5329 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
5325 5330 copy = copies.pathcopies(repo[node1], repo[node2])
5326 5331
5327 5332 fm = ui.formatter('status', opts)
5328 5333 fmt = '%s' + end
5329 5334 showchar = not opts.get('no_status')
5330 5335
5331 5336 for state, char, files in changestates:
5332 5337 if state in show:
5333 5338 label = 'status.' + state
5334 5339 for f in files:
5335 5340 fm.startitem()
5336 5341 fm.condwrite(showchar, 'status', '%s ', char, label=label)
5337 5342 fm.write('path', fmt, repo.pathto(f, cwd), label=label)
5338 5343 if f in copy:
5339 5344 fm.write("copy", ' %s' + end, repo.pathto(copy[f], cwd),
5340 5345 label='status.copied')
5341 5346 fm.end()
5342 5347
5343 5348 @command('^summary|sum',
5344 5349 [('', 'remote', None, _('check for push and pull'))], '[--remote]')
5345 5350 def summary(ui, repo, **opts):
5346 5351 """summarize working directory state
5347 5352
5348 5353 This generates a brief summary of the working directory state,
5349 5354 including parents, branch, commit status, and available updates.
5350 5355
5351 5356 With the --remote option, this will check the default paths for
5352 5357 incoming and outgoing changes. This can be time-consuming.
5353 5358
5354 5359 Returns 0 on success.
5355 5360 """
5356 5361
5357 5362 ctx = repo[None]
5358 5363 parents = ctx.parents()
5359 5364 pnode = parents[0].node()
5360 5365 marks = []
5361 5366
5362 5367 for p in parents:
5363 5368 # label with log.changeset (instead of log.parent) since this
5364 5369 # shows a working directory parent *changeset*:
5365 5370 # i18n: column positioning for "hg summary"
5366 5371 ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
5367 5372 label='log.changeset changeset.%s' % p.phasestr())
5368 5373 ui.write(' '.join(p.tags()), label='log.tag')
5369 5374 if p.bookmarks():
5370 5375 marks.extend(p.bookmarks())
5371 5376 if p.rev() == -1:
5372 5377 if not len(repo):
5373 5378 ui.write(_(' (empty repository)'))
5374 5379 else:
5375 5380 ui.write(_(' (no revision checked out)'))
5376 5381 ui.write('\n')
5377 5382 if p.description():
5378 5383 ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
5379 5384 label='log.summary')
5380 5385
5381 5386 branch = ctx.branch()
5382 5387 bheads = repo.branchheads(branch)
5383 5388 # i18n: column positioning for "hg summary"
5384 5389 m = _('branch: %s\n') % branch
5385 5390 if branch != 'default':
5386 5391 ui.write(m, label='log.branch')
5387 5392 else:
5388 5393 ui.status(m, label='log.branch')
5389 5394
5390 5395 if marks:
5391 5396 current = repo._bookmarkcurrent
5392 5397 # i18n: column positioning for "hg summary"
5393 5398 ui.write(_('bookmarks:'), label='log.bookmark')
5394 5399 if current is not None:
5395 5400 if current in marks:
5396 5401 ui.write(' *' + current, label='bookmarks.current')
5397 5402 marks.remove(current)
5398 5403 else:
5399 5404 ui.write(' [%s]' % current, label='bookmarks.current')
5400 5405 for m in marks:
5401 5406 ui.write(' ' + m, label='log.bookmark')
5402 5407 ui.write('\n', label='log.bookmark')
5403 5408
5404 5409 st = list(repo.status(unknown=True))[:6]
5405 5410
5406 5411 c = repo.dirstate.copies()
5407 5412 copied, renamed = [], []
5408 5413 for d, s in c.iteritems():
5409 5414 if s in st[2]:
5410 5415 st[2].remove(s)
5411 5416 renamed.append(d)
5412 5417 else:
5413 5418 copied.append(d)
5414 5419 if d in st[1]:
5415 5420 st[1].remove(d)
5416 5421 st.insert(3, renamed)
5417 5422 st.insert(4, copied)
5418 5423
5419 5424 ms = mergemod.mergestate(repo)
5420 5425 st.append([f for f in ms if ms[f] == 'u'])
5421 5426
5422 5427 subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
5423 5428 st.append(subs)
5424 5429
5425 5430 labels = [ui.label(_('%d modified'), 'status.modified'),
5426 5431 ui.label(_('%d added'), 'status.added'),
5427 5432 ui.label(_('%d removed'), 'status.removed'),
5428 5433 ui.label(_('%d renamed'), 'status.copied'),
5429 5434 ui.label(_('%d copied'), 'status.copied'),
5430 5435 ui.label(_('%d deleted'), 'status.deleted'),
5431 5436 ui.label(_('%d unknown'), 'status.unknown'),
5432 5437 ui.label(_('%d ignored'), 'status.ignored'),
5433 5438 ui.label(_('%d unresolved'), 'resolve.unresolved'),
5434 5439 ui.label(_('%d subrepos'), 'status.modified')]
5435 5440 t = []
5436 5441 for s, l in zip(st, labels):
5437 5442 if s:
5438 5443 t.append(l % len(s))
5439 5444
5440 5445 t = ', '.join(t)
5441 5446 cleanworkdir = False
5442 5447
5443 5448 if len(parents) > 1:
5444 5449 t += _(' (merge)')
5445 5450 elif branch != parents[0].branch():
5446 5451 t += _(' (new branch)')
5447 5452 elif (parents[0].closesbranch() and
5448 5453 pnode in repo.branchheads(branch, closed=True)):
5449 5454 t += _(' (head closed)')
5450 5455 elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]):
5451 5456 t += _(' (clean)')
5452 5457 cleanworkdir = True
5453 5458 elif pnode not in bheads:
5454 5459 t += _(' (new branch head)')
5455 5460
5456 5461 if cleanworkdir:
5457 5462 # i18n: column positioning for "hg summary"
5458 5463 ui.status(_('commit: %s\n') % t.strip())
5459 5464 else:
5460 5465 # i18n: column positioning for "hg summary"
5461 5466 ui.write(_('commit: %s\n') % t.strip())
5462 5467
5463 5468 # all ancestors of branch heads - all ancestors of parent = new csets
5464 5469 new = [0] * len(repo)
5465 5470 cl = repo.changelog
5466 5471 for a in [cl.rev(n) for n in bheads]:
5467 5472 new[a] = 1
5468 5473 for a in cl.ancestors([cl.rev(n) for n in bheads]):
5469 5474 new[a] = 1
5470 5475 for a in [p.rev() for p in parents]:
5471 5476 if a >= 0:
5472 5477 new[a] = 0
5473 5478 for a in cl.ancestors([p.rev() for p in parents]):
5474 5479 new[a] = 0
5475 5480 new = sum(new)
5476 5481
5477 5482 if new == 0:
5478 5483 # i18n: column positioning for "hg summary"
5479 5484 ui.status(_('update: (current)\n'))
5480 5485 elif pnode not in bheads:
5481 5486 # i18n: column positioning for "hg summary"
5482 5487 ui.write(_('update: %d new changesets (update)\n') % new)
5483 5488 else:
5484 5489 # i18n: column positioning for "hg summary"
5485 5490 ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
5486 5491 (new, len(bheads)))
5487 5492
5488 5493 if opts.get('remote'):
5489 5494 t = []
5490 5495 source, branches = hg.parseurl(ui.expandpath('default'))
5491 5496 sbranch = branches[0]
5492 5497 other = hg.peer(repo, {}, source)
5493 5498 revs, checkout = hg.addbranchrevs(repo, other, branches,
5494 5499 opts.get('rev'))
5495 5500 if revs:
5496 5501 revs = [other.lookup(rev) for rev in revs]
5497 5502 ui.debug('comparing with %s\n' % util.hidepassword(source))
5498 5503 repo.ui.pushbuffer()
5499 5504 commoninc = discovery.findcommonincoming(repo, other, heads=revs)
5500 5505 _common, incoming, _rheads = commoninc
5501 5506 repo.ui.popbuffer()
5502 5507 if incoming:
5503 5508 t.append(_('1 or more incoming'))
5504 5509
5505 5510 dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
5506 5511 dbranch = branches[0]
5507 5512 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
5508 5513 if source != dest:
5509 5514 other = hg.peer(repo, {}, dest)
5510 5515 ui.debug('comparing with %s\n' % util.hidepassword(dest))
5511 5516 if (source != dest or (sbranch is not None and sbranch != dbranch)):
5512 5517 commoninc = None
5513 5518 if revs:
5514 5519 revs = [repo.lookup(rev) for rev in revs]
5515 5520 repo.ui.pushbuffer()
5516 5521 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs,
5517 5522 commoninc=commoninc)
5518 5523 repo.ui.popbuffer()
5519 5524 o = outgoing.missing
5520 5525 if o:
5521 5526 t.append(_('%d outgoing') % len(o))
5522 5527 if 'bookmarks' in other.listkeys('namespaces'):
5523 5528 lmarks = repo.listkeys('bookmarks')
5524 5529 rmarks = other.listkeys('bookmarks')
5525 5530 diff = set(rmarks) - set(lmarks)
5526 5531 if len(diff) > 0:
5527 5532 t.append(_('%d incoming bookmarks') % len(diff))
5528 5533 diff = set(lmarks) - set(rmarks)
5529 5534 if len(diff) > 0:
5530 5535 t.append(_('%d outgoing bookmarks') % len(diff))
5531 5536
5532 5537 if t:
5533 5538 # i18n: column positioning for "hg summary"
5534 5539 ui.write(_('remote: %s\n') % (', '.join(t)))
5535 5540 else:
5536 5541 # i18n: column positioning for "hg summary"
5537 5542 ui.status(_('remote: (synced)\n'))
5538 5543
5539 5544 @command('tag',
5540 5545 [('f', 'force', None, _('force tag')),
5541 5546 ('l', 'local', None, _('make the tag local')),
5542 5547 ('r', 'rev', '', _('revision to tag'), _('REV')),
5543 5548 ('', 'remove', None, _('remove a tag')),
5544 5549 # -l/--local is already there, commitopts cannot be used
5545 5550 ('e', 'edit', None, _('edit commit message')),
5546 5551 ('m', 'message', '', _('use <text> as commit message'), _('TEXT')),
5547 5552 ] + commitopts2,
5548 5553 _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
5549 5554 def tag(ui, repo, name1, *names, **opts):
5550 5555 """add one or more tags for the current or given revision
5551 5556
5552 5557 Name a particular revision using <name>.
5553 5558
5554 5559 Tags are used to name particular revisions of the repository and are
5555 5560 very useful to compare different revisions, to go back to significant
5556 5561 earlier versions or to mark branch points as releases, etc. Changing
5557 5562 an existing tag is normally disallowed; use -f/--force to override.
5558 5563
5559 5564 If no revision is given, the parent of the working directory is
5560 5565 used, or tip if no revision is checked out.
5561 5566
5562 5567 To facilitate version control, distribution, and merging of tags,
5563 5568 they are stored as a file named ".hgtags" which is managed similarly
5564 5569 to other project files and can be hand-edited if necessary. This
5565 5570 also means that tagging creates a new commit. The file
5566 5571 ".hg/localtags" is used for local tags (not shared among
5567 5572 repositories).
5568 5573
5569 5574 Tag commits are usually made at the head of a branch. If the parent
5570 5575 of the working directory is not a branch head, :hg:`tag` aborts; use
5571 5576 -f/--force to force the tag commit to be based on a non-head
5572 5577 changeset.
5573 5578
5574 5579 See :hg:`help dates` for a list of formats valid for -d/--date.
5575 5580
5576 5581 Since tag names have priority over branch names during revision
5577 5582 lookup, using an existing branch name as a tag name is discouraged.
5578 5583
5579 5584 Returns 0 on success.
5580 5585 """
5581 5586 wlock = lock = None
5582 5587 try:
5583 5588 wlock = repo.wlock()
5584 5589 lock = repo.lock()
5585 5590 rev_ = "."
5586 5591 names = [t.strip() for t in (name1,) + names]
5587 5592 if len(names) != len(set(names)):
5588 5593 raise util.Abort(_('tag names must be unique'))
5589 5594 for n in names:
5590 5595 scmutil.checknewlabel(repo, n, 'tag')
5591 5596 if not n:
5592 5597 raise util.Abort(_('tag names cannot consist entirely of '
5593 5598 'whitespace'))
5594 5599 if opts.get('rev') and opts.get('remove'):
5595 5600 raise util.Abort(_("--rev and --remove are incompatible"))
5596 5601 if opts.get('rev'):
5597 5602 rev_ = opts['rev']
5598 5603 message = opts.get('message')
5599 5604 if opts.get('remove'):
5600 5605 expectedtype = opts.get('local') and 'local' or 'global'
5601 5606 for n in names:
5602 5607 if not repo.tagtype(n):
5603 5608 raise util.Abort(_("tag '%s' does not exist") % n)
5604 5609 if repo.tagtype(n) != expectedtype:
5605 5610 if expectedtype == 'global':
5606 5611 raise util.Abort(_("tag '%s' is not a global tag") % n)
5607 5612 else:
5608 5613 raise util.Abort(_("tag '%s' is not a local tag") % n)
5609 5614 rev_ = nullid
5610 5615 if not message:
5611 5616 # we don't translate commit messages
5612 5617 message = 'Removed tag %s' % ', '.join(names)
5613 5618 elif not opts.get('force'):
5614 5619 for n in names:
5615 5620 if n in repo.tags():
5616 5621 raise util.Abort(_("tag '%s' already exists "
5617 5622 "(use -f to force)") % n)
5618 5623 if not opts.get('local'):
5619 5624 p1, p2 = repo.dirstate.parents()
5620 5625 if p2 != nullid:
5621 5626 raise util.Abort(_('uncommitted merge'))
5622 5627 bheads = repo.branchheads()
5623 5628 if not opts.get('force') and bheads and p1 not in bheads:
5624 5629 raise util.Abort(_('not at a branch head (use -f to force)'))
5625 5630 r = scmutil.revsingle(repo, rev_).node()
5626 5631
5627 5632 if not message:
5628 5633 # we don't translate commit messages
5629 5634 message = ('Added tag %s for changeset %s' %
5630 5635 (', '.join(names), short(r)))
5631 5636
5632 5637 date = opts.get('date')
5633 5638 if date:
5634 5639 date = util.parsedate(date)
5635 5640
5636 5641 if opts.get('edit'):
5637 5642 message = ui.edit(message, ui.username())
5638 5643
5639 5644 # don't allow tagging the null rev
5640 5645 if (not opts.get('remove') and
5641 5646 scmutil.revsingle(repo, rev_).rev() == nullrev):
5642 5647 raise util.Abort(_("cannot tag null revision"))
5643 5648
5644 5649 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
5645 5650 finally:
5646 5651 release(lock, wlock)
5647 5652
5648 5653 @command('tags', [], '')
5649 5654 def tags(ui, repo, **opts):
5650 5655 """list repository tags
5651 5656
5652 5657 This lists both regular and local tags. When the -v/--verbose
5653 5658 switch is used, a third column "local" is printed for local tags.
5654 5659
5655 5660 Returns 0 on success.
5656 5661 """
5657 5662
5658 5663 fm = ui.formatter('tags', opts)
5659 5664 hexfunc = ui.debugflag and hex or short
5660 5665 tagtype = ""
5661 5666
5662 5667 for t, n in reversed(repo.tagslist()):
5663 5668 hn = hexfunc(n)
5664 5669 label = 'tags.normal'
5665 5670 tagtype = ''
5666 5671 if repo.tagtype(t) == 'local':
5667 5672 label = 'tags.local'
5668 5673 tagtype = 'local'
5669 5674
5670 5675 fm.startitem()
5671 5676 fm.write('tag', '%s', t, label=label)
5672 5677 fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
5673 5678 fm.condwrite(not ui.quiet, 'rev id', fmt,
5674 5679 repo.changelog.rev(n), hn, label=label)
5675 5680 fm.condwrite(ui.verbose and tagtype, 'type', ' %s',
5676 5681 tagtype, label=label)
5677 5682 fm.plain('\n')
5678 5683 fm.end()
5679 5684
5680 5685 @command('tip',
5681 5686 [('p', 'patch', None, _('show patch')),
5682 5687 ('g', 'git', None, _('use git extended diff format')),
5683 5688 ] + templateopts,
5684 5689 _('[-p] [-g]'))
5685 5690 def tip(ui, repo, **opts):
5686 5691 """show the tip revision
5687 5692
5688 5693 The tip revision (usually just called the tip) is the changeset
5689 5694 most recently added to the repository (and therefore the most
5690 5695 recently changed head).
5691 5696
5692 5697 If you have just made a commit, that commit will be the tip. If
5693 5698 you have just pulled changes from another repository, the tip of
5694 5699 that repository becomes the current tip. The "tip" tag is special
5695 5700 and cannot be renamed or assigned to a different changeset.
5696 5701
5697 5702 Returns 0 on success.
5698 5703 """
5699 5704 displayer = cmdutil.show_changeset(ui, repo, opts)
5700 5705 displayer.show(repo['tip'])
5701 5706 displayer.close()
5702 5707
5703 5708 @command('unbundle',
5704 5709 [('u', 'update', None,
5705 5710 _('update to new branch head if changesets were unbundled'))],
5706 5711 _('[-u] FILE...'))
5707 5712 def unbundle(ui, repo, fname1, *fnames, **opts):
5708 5713 """apply one or more changegroup files
5709 5714
5710 5715 Apply one or more compressed changegroup files generated by the
5711 5716 bundle command.
5712 5717
5713 5718 Returns 0 on success, 1 if an update has unresolved files.
5714 5719 """
5715 5720 fnames = (fname1,) + fnames
5716 5721
5717 5722 lock = repo.lock()
5718 5723 wc = repo['.']
5719 5724 try:
5720 5725 for fname in fnames:
5721 5726 f = hg.openpath(ui, fname)
5722 5727 gen = changegroup.readbundle(f, fname)
5723 5728 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
5724 5729 finally:
5725 5730 lock.release()
5726 5731 bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch())
5727 5732 return postincoming(ui, repo, modheads, opts.get('update'), None)
5728 5733
5729 5734 @command('^update|up|checkout|co',
5730 5735 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
5731 5736 ('c', 'check', None,
5732 5737 _('update across branches if no uncommitted changes')),
5733 5738 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
5734 5739 ('r', 'rev', '', _('revision'), _('REV'))],
5735 5740 _('[-c] [-C] [-d DATE] [[-r] REV]'))
5736 5741 def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
5737 5742 """update working directory (or switch revisions)
5738 5743
5739 5744 Update the repository's working directory to the specified
5740 5745 changeset. If no changeset is specified, update to the tip of the
5741 5746 current named branch and move the current bookmark (see :hg:`help
5742 5747 bookmarks`).
5743 5748
5744 5749 Update sets the working directory's parent revision to the specified
5745 5750 changeset (see :hg:`help parents`).
5746 5751
5747 5752 If the changeset is not a descendant or ancestor of the working
5748 5753 directory's parent, the update is aborted. With the -c/--check
5749 5754 option, the working directory is checked for uncommitted changes; if
5750 5755 none are found, the working directory is updated to the specified
5751 5756 changeset.
5752 5757
5753 5758 .. container:: verbose
5754 5759
5755 5760 The following rules apply when the working directory contains
5756 5761 uncommitted changes:
5757 5762
5758 5763 1. If neither -c/--check nor -C/--clean is specified, and if
5759 5764 the requested changeset is an ancestor or descendant of
5760 5765 the working directory's parent, the uncommitted changes
5761 5766 are merged into the requested changeset and the merged
5762 5767 result is left uncommitted. If the requested changeset is
5763 5768 not an ancestor or descendant (that is, it is on another
5764 5769 branch), the update is aborted and the uncommitted changes
5765 5770 are preserved.
5766 5771
5767 5772 2. With the -c/--check option, the update is aborted and the
5768 5773 uncommitted changes are preserved.
5769 5774
5770 5775 3. With the -C/--clean option, uncommitted changes are discarded and
5771 5776 the working directory is updated to the requested changeset.
5772 5777
5773 5778 To cancel an uncommitted merge (and lose your changes), use
5774 5779 :hg:`update --clean .`.
5775 5780
5776 5781 Use null as the changeset to remove the working directory (like
5777 5782 :hg:`clone -U`).
5778 5783
5779 5784 If you want to revert just one file to an older revision, use
5780 5785 :hg:`revert [-r REV] NAME`.
5781 5786
5782 5787 See :hg:`help dates` for a list of formats valid for -d/--date.
5783 5788
5784 5789 Returns 0 on success, 1 if there are unresolved files.
5785 5790 """
5786 5791 if rev and node:
5787 5792 raise util.Abort(_("please specify just one revision"))
5788 5793
5789 5794 if rev is None or rev == '':
5790 5795 rev = node
5791 5796
5792 5797 # with no argument, we also move the current bookmark, if any
5793 5798 movemarkfrom = None
5794 5799 if rev is None:
5795 5800 curmark = repo._bookmarkcurrent
5796 5801 if bookmarks.iscurrent(repo):
5797 5802 movemarkfrom = repo['.'].node()
5798 5803 elif curmark:
5799 5804 ui.status(_("updating to active bookmark %s\n") % curmark)
5800 5805 rev = curmark
5801 5806
5802 5807 # if we defined a bookmark, we have to remember the original bookmark name
5803 5808 brev = rev
5804 5809 rev = scmutil.revsingle(repo, rev, rev).rev()
5805 5810
5806 5811 if check and clean:
5807 5812 raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
5808 5813
5809 5814 if date:
5810 5815 if rev is not None:
5811 5816 raise util.Abort(_("you can't specify a revision and a date"))
5812 5817 rev = cmdutil.finddate(ui, repo, date)
5813 5818
5814 5819 if check:
5815 5820 c = repo[None]
5816 5821 if c.dirty(merge=False, branch=False, missing=True):
5817 5822 raise util.Abort(_("uncommitted local changes"))
5818 5823 if rev is None:
5819 5824 rev = repo[repo[None].branch()].rev()
5820 5825 mergemod._checkunknown(repo, repo[None], repo[rev])
5821 5826
5822 5827 if clean:
5823 5828 ret = hg.clean(repo, rev)
5824 5829 else:
5825 5830 ret = hg.update(repo, rev)
5826 5831
5827 5832 if not ret and movemarkfrom:
5828 5833 if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
5829 5834 ui.status(_("updating bookmark %s\n") % repo._bookmarkcurrent)
5830 5835 elif brev in repo._bookmarks:
5831 5836 bookmarks.setcurrent(repo, brev)
5832 5837 elif brev:
5833 5838 bookmarks.unsetcurrent(repo)
5834 5839
5835 5840 return ret
5836 5841
5837 5842 @command('verify', [])
5838 5843 def verify(ui, repo):
5839 5844 """verify the integrity of the repository
5840 5845
5841 5846 Verify the integrity of the current repository.
5842 5847
5843 5848 This will perform an extensive check of the repository's
5844 5849 integrity, validating the hashes and checksums of each entry in
5845 5850 the changelog, manifest, and tracked files, as well as the
5846 5851 integrity of their crosslinks and indices.
5847 5852
5848 5853 Please see http://mercurial.selenic.com/wiki/RepositoryCorruption
5849 5854 for more information about recovery from corruption of the
5850 5855 repository.
5851 5856
5852 5857 Returns 0 on success, 1 if errors are encountered.
5853 5858 """
5854 5859 return hg.verify(repo)
5855 5860
5856 5861 @command('version', [])
5857 5862 def version_(ui):
5858 5863 """output version and copyright information"""
5859 5864 ui.write(_("Mercurial Distributed SCM (version %s)\n")
5860 5865 % util.version())
5861 5866 ui.status(_(
5862 5867 "(see http://mercurial.selenic.com for more information)\n"
5863 5868 "\nCopyright (C) 2005-2012 Matt Mackall and others\n"
5864 5869 "This is free software; see the source for copying conditions. "
5865 5870 "There is NO\nwarranty; "
5866 5871 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
5867 5872 ))
5868 5873
5869 5874 norepo = ("clone init version help debugcommands debugcomplete"
5870 5875 " debugdate debuginstall debugfsinfo debugpushkey debugwireargs"
5871 5876 " debugknown debuggetbundle debugbundle")
5872 5877 optionalrepo = ("identify paths serve showconfig debugancestor debugdag"
5873 5878 " debugdata debugindex debugindexdot debugrevlog")
5874 5879 inferrepo = ("add addremove annotate cat commit diff grep forget log parents"
5875 5880 " remove resolve status debugwalk")
@@ -1,2613 +1,2618 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if a repo has an unfilteredpropertycache value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('served')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 def getbundle(self, source, heads=None, common=None):
103 return self._repo.getbundle(source, heads=heads, common=common)
102 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
103 return self._repo.getbundle(source, heads=heads, common=common,
104 bundlecaps=None)
104 105
105 106 # TODO We might want to move the next two calls into legacypeer and add
106 107 # unbundle instead.
107 108
108 109 def lock(self):
109 110 return self._repo.lock()
110 111
111 112 def addchangegroup(self, cg, source, url):
112 113 return self._repo.addchangegroup(cg, source, url)
113 114
114 115 def pushkey(self, namespace, key, old, new):
115 116 return self._repo.pushkey(namespace, key, old, new)
116 117
117 118 def listkeys(self, namespace):
118 119 return self._repo.listkeys(namespace)
119 120
120 121 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 122 '''used to test argument passing over the wire'''
122 123 return "%s %s %s %s %s" % (one, two, three, four, five)
123 124
124 125 class locallegacypeer(localpeer):
125 126 '''peer extension which implements legacy methods too; used for tests with
126 127 restricted capabilities'''
127 128
128 129 def __init__(self, repo):
129 130 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 131
131 132 def branches(self, nodes):
132 133 return self._repo.branches(nodes)
133 134
134 135 def between(self, pairs):
135 136 return self._repo.between(pairs)
136 137
137 138 def changegroup(self, basenodes, source):
138 139 return self._repo.changegroup(basenodes, source)
139 140
140 141 def changegroupsubset(self, bases, heads, source):
141 142 return self._repo.changegroupsubset(bases, heads, source)
142 143
143 144 class localrepository(object):
144 145
145 146 supportedformats = set(('revlogv1', 'generaldelta'))
146 147 supported = supportedformats | set(('store', 'fncache', 'shared',
147 148 'dotencode'))
148 149 openerreqs = set(('revlogv1', 'generaldelta'))
149 150 requirements = ['revlogv1']
150 151 filtername = None
151 152
152 153 def _baserequirements(self, create):
153 154 return self.requirements[:]
154 155
155 156 def __init__(self, baseui, path=None, create=False):
156 157 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
157 158 self.wopener = self.wvfs
158 159 self.root = self.wvfs.base
159 160 self.path = self.wvfs.join(".hg")
160 161 self.origroot = path
161 162 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 163 self.vfs = scmutil.vfs(self.path)
163 164 self.opener = self.vfs
164 165 self.baseui = baseui
165 166 self.ui = baseui.copy()
166 167 # A list of callback to shape the phase if no data were found.
167 168 # Callback are in the form: func(repo, roots) --> processed root.
168 169 # This list it to be filled by extension during repo setup
169 170 self._phasedefaults = []
170 171 try:
171 172 self.ui.readconfig(self.join("hgrc"), self.root)
172 173 extensions.loadall(self.ui)
173 174 except IOError:
174 175 pass
175 176
176 177 if not self.vfs.isdir():
177 178 if create:
178 179 if not self.wvfs.exists():
179 180 self.wvfs.makedirs()
180 181 self.vfs.makedir(notindexed=True)
181 182 requirements = self._baserequirements(create)
182 183 if self.ui.configbool('format', 'usestore', True):
183 184 self.vfs.mkdir("store")
184 185 requirements.append("store")
185 186 if self.ui.configbool('format', 'usefncache', True):
186 187 requirements.append("fncache")
187 188 if self.ui.configbool('format', 'dotencode', True):
188 189 requirements.append('dotencode')
189 190 # create an invalid changelog
190 191 self.vfs.append(
191 192 "00changelog.i",
192 193 '\0\0\0\2' # represents revlogv2
193 194 ' dummy changelog to prevent using the old repo layout'
194 195 )
195 196 if self.ui.configbool('format', 'generaldelta', False):
196 197 requirements.append("generaldelta")
197 198 requirements = set(requirements)
198 199 else:
199 200 raise error.RepoError(_("repository %s not found") % path)
200 201 elif create:
201 202 raise error.RepoError(_("repository %s already exists") % path)
202 203 else:
203 204 try:
204 205 requirements = scmutil.readrequires(self.vfs, self.supported)
205 206 except IOError, inst:
206 207 if inst.errno != errno.ENOENT:
207 208 raise
208 209 requirements = set()
209 210
210 211 self.sharedpath = self.path
211 212 try:
212 213 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
213 214 realpath=True)
214 215 s = vfs.base
215 216 if not vfs.exists():
216 217 raise error.RepoError(
217 218 _('.hg/sharedpath points to nonexistent directory %s') % s)
218 219 self.sharedpath = s
219 220 except IOError, inst:
220 221 if inst.errno != errno.ENOENT:
221 222 raise
222 223
223 224 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
224 225 self.spath = self.store.path
225 226 self.svfs = self.store.vfs
226 227 self.sopener = self.svfs
227 228 self.sjoin = self.store.join
228 229 self.vfs.createmode = self.store.createmode
229 230 self._applyrequirements(requirements)
230 231 if create:
231 232 self._writerequirements()
232 233
233 234
234 235 self._branchcaches = {}
235 236 self.filterpats = {}
236 237 self._datafilters = {}
237 238 self._transref = self._lockref = self._wlockref = None
238 239
239 240 # A cache for various files under .hg/ that tracks file changes,
240 241 # (used by the filecache decorator)
241 242 #
242 243 # Maps a property name to its util.filecacheentry
243 244 self._filecache = {}
244 245
245 246 # hold sets of revision to be filtered
246 247 # should be cleared when something might have changed the filter value:
247 248 # - new changesets,
248 249 # - phase change,
249 250 # - new obsolescence marker,
250 251 # - working directory parent change,
251 252 # - bookmark changes
252 253 self.filteredrevcache = {}
253 254
254 255 def close(self):
255 256 pass
256 257
257 258 def _restrictcapabilities(self, caps):
258 259 return caps
259 260
260 261 def _applyrequirements(self, requirements):
261 262 self.requirements = requirements
262 263 self.sopener.options = dict((r, 1) for r in requirements
263 264 if r in self.openerreqs)
264 265
265 266 def _writerequirements(self):
266 267 reqfile = self.opener("requires", "w")
267 268 for r in sorted(self.requirements):
268 269 reqfile.write("%s\n" % r)
269 270 reqfile.close()
270 271
271 272 def _checknested(self, path):
272 273 """Determine if path is a legal nested repository."""
273 274 if not path.startswith(self.root):
274 275 return False
275 276 subpath = path[len(self.root) + 1:]
276 277 normsubpath = util.pconvert(subpath)
277 278
278 279 # XXX: Checking against the current working copy is wrong in
279 280 # the sense that it can reject things like
280 281 #
281 282 # $ hg cat -r 10 sub/x.txt
282 283 #
283 284 # if sub/ is no longer a subrepository in the working copy
284 285 # parent revision.
285 286 #
286 287 # However, it can of course also allow things that would have
287 288 # been rejected before, such as the above cat command if sub/
288 289 # is a subrepository now, but was a normal directory before.
289 290 # The old path auditor would have rejected by mistake since it
290 291 # panics when it sees sub/.hg/.
291 292 #
292 293 # All in all, checking against the working copy seems sensible
293 294 # since we want to prevent access to nested repositories on
294 295 # the filesystem *now*.
295 296 ctx = self[None]
296 297 parts = util.splitpath(subpath)
297 298 while parts:
298 299 prefix = '/'.join(parts)
299 300 if prefix in ctx.substate:
300 301 if prefix == normsubpath:
301 302 return True
302 303 else:
303 304 sub = ctx.sub(prefix)
304 305 return sub.checknested(subpath[len(prefix) + 1:])
305 306 else:
306 307 parts.pop()
307 308 return False
308 309
309 310 def peer(self):
310 311 return localpeer(self) # not cached to avoid reference cycle
311 312
312 313 def unfiltered(self):
313 314 """Return unfiltered version of the repository
314 315
315 316 Intended to be overwritten by filtered repo."""
316 317 return self
317 318
318 319 def filtered(self, name):
319 320 """Return a filtered version of a repository"""
320 321 # build a new class with the mixin and the current class
321 322 # (possibly subclass of the repo)
322 323 class proxycls(repoview.repoview, self.unfiltered().__class__):
323 324 pass
324 325 return proxycls(self, name)
325 326
326 327 @repofilecache('bookmarks')
327 328 def _bookmarks(self):
328 329 return bookmarks.bmstore(self)
329 330
330 331 @repofilecache('bookmarks.current')
331 332 def _bookmarkcurrent(self):
332 333 return bookmarks.readcurrent(self)
333 334
334 335 def bookmarkheads(self, bookmark):
335 336 name = bookmark.split('@', 1)[0]
336 337 heads = []
337 338 for mark, n in self._bookmarks.iteritems():
338 339 if mark.split('@', 1)[0] == name:
339 340 heads.append(n)
340 341 return heads
341 342
342 343 @storecache('phaseroots')
343 344 def _phasecache(self):
344 345 return phases.phasecache(self, self._phasedefaults)
345 346
346 347 @storecache('obsstore')
347 348 def obsstore(self):
348 349 store = obsolete.obsstore(self.sopener)
349 350 if store and not obsolete._enabled:
350 351 # message is rare enough to not be translated
351 352 msg = 'obsolete feature not enabled but %i markers found!\n'
352 353 self.ui.warn(msg % len(list(store)))
353 354 return store
354 355
355 356 @storecache('00changelog.i')
356 357 def changelog(self):
357 358 c = changelog.changelog(self.sopener)
358 359 if 'HG_PENDING' in os.environ:
359 360 p = os.environ['HG_PENDING']
360 361 if p.startswith(self.root):
361 362 c.readpending('00changelog.i.a')
362 363 return c
363 364
364 365 @storecache('00manifest.i')
365 366 def manifest(self):
366 367 return manifest.manifest(self.sopener)
367 368
368 369 @repofilecache('dirstate')
369 370 def dirstate(self):
370 371 warned = [0]
371 372 def validate(node):
372 373 try:
373 374 self.changelog.rev(node)
374 375 return node
375 376 except error.LookupError:
376 377 if not warned[0]:
377 378 warned[0] = True
378 379 self.ui.warn(_("warning: ignoring unknown"
379 380 " working parent %s!\n") % short(node))
380 381 return nullid
381 382
382 383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
383 384
384 385 def __getitem__(self, changeid):
385 386 if changeid is None:
386 387 return context.workingctx(self)
387 388 return context.changectx(self, changeid)
388 389
389 390 def __contains__(self, changeid):
390 391 try:
391 392 return bool(self.lookup(changeid))
392 393 except error.RepoLookupError:
393 394 return False
394 395
395 396 def __nonzero__(self):
396 397 return True
397 398
398 399 def __len__(self):
399 400 return len(self.changelog)
400 401
401 402 def __iter__(self):
402 403 return iter(self.changelog)
403 404
404 405 def revs(self, expr, *args):
405 406 '''Return a list of revisions matching the given revset'''
406 407 expr = revset.formatspec(expr, *args)
407 408 m = revset.match(None, expr)
408 409 return [r for r in m(self, list(self))]
409 410
410 411 def set(self, expr, *args):
411 412 '''
412 413 Yield a context for each matching revision, after doing arg
413 414 replacement via revset.formatspec
414 415 '''
415 416 for r in self.revs(expr, *args):
416 417 yield self[r]
417 418
418 419 def url(self):
419 420 return 'file:' + self.root
420 421
421 422 def hook(self, name, throw=False, **args):
422 423 return hook.hook(self.ui, self, name, throw, **args)
423 424
424 425 @unfilteredmethod
425 426 def _tag(self, names, node, message, local, user, date, extra={}):
426 427 if isinstance(names, str):
427 428 names = (names,)
428 429
429 430 branches = self.branchmap()
430 431 for name in names:
431 432 self.hook('pretag', throw=True, node=hex(node), tag=name,
432 433 local=local)
433 434 if name in branches:
434 435 self.ui.warn(_("warning: tag %s conflicts with existing"
435 436 " branch name\n") % name)
436 437
437 438 def writetags(fp, names, munge, prevtags):
438 439 fp.seek(0, 2)
439 440 if prevtags and prevtags[-1] != '\n':
440 441 fp.write('\n')
441 442 for name in names:
442 443 m = munge and munge(name) or name
443 444 if (self._tagscache.tagtypes and
444 445 name in self._tagscache.tagtypes):
445 446 old = self.tags().get(name, nullid)
446 447 fp.write('%s %s\n' % (hex(old), m))
447 448 fp.write('%s %s\n' % (hex(node), m))
448 449 fp.close()
449 450
450 451 prevtags = ''
451 452 if local:
452 453 try:
453 454 fp = self.opener('localtags', 'r+')
454 455 except IOError:
455 456 fp = self.opener('localtags', 'a')
456 457 else:
457 458 prevtags = fp.read()
458 459
459 460 # local tags are stored in the current charset
460 461 writetags(fp, names, None, prevtags)
461 462 for name in names:
462 463 self.hook('tag', node=hex(node), tag=name, local=local)
463 464 return
464 465
465 466 try:
466 467 fp = self.wfile('.hgtags', 'rb+')
467 468 except IOError, e:
468 469 if e.errno != errno.ENOENT:
469 470 raise
470 471 fp = self.wfile('.hgtags', 'ab')
471 472 else:
472 473 prevtags = fp.read()
473 474
474 475 # committed tags are stored in UTF-8
475 476 writetags(fp, names, encoding.fromlocal, prevtags)
476 477
477 478 fp.close()
478 479
479 480 self.invalidatecaches()
480 481
481 482 if '.hgtags' not in self.dirstate:
482 483 self[None].add(['.hgtags'])
483 484
484 485 m = matchmod.exact(self.root, '', ['.hgtags'])
485 486 tagnode = self.commit(message, user, date, extra=extra, match=m)
486 487
487 488 for name in names:
488 489 self.hook('tag', node=hex(node), tag=name, local=local)
489 490
490 491 return tagnode
491 492
492 493 def tag(self, names, node, message, local, user, date):
493 494 '''tag a revision with one or more symbolic names.
494 495
495 496 names is a list of strings or, when adding a single tag, names may be a
496 497 string.
497 498
498 499 if local is True, the tags are stored in a per-repository file.
499 500 otherwise, they are stored in the .hgtags file, and a new
500 501 changeset is committed with the change.
501 502
502 503 keyword arguments:
503 504
504 505 local: whether to store tags in non-version-controlled file
505 506 (default False)
506 507
507 508 message: commit message to use if committing
508 509
509 510 user: name of user to use if committing
510 511
511 512 date: date tuple to use if committing'''
512 513
513 514 if not local:
514 515 for x in self.status()[:5]:
515 516 if '.hgtags' in x:
516 517 raise util.Abort(_('working copy of .hgtags is changed '
517 518 '(please commit .hgtags manually)'))
518 519
519 520 self.tags() # instantiate the cache
520 521 self._tag(names, node, message, local, user, date)
521 522
522 523 @filteredpropertycache
523 524 def _tagscache(self):
524 525 '''Returns a tagscache object that contains various tags related
525 526 caches.'''
526 527
527 528 # This simplifies its cache management by having one decorated
528 529 # function (this one) and the rest simply fetch things from it.
529 530 class tagscache(object):
530 531 def __init__(self):
531 532 # These two define the set of tags for this repository. tags
532 533 # maps tag name to node; tagtypes maps tag name to 'global' or
533 534 # 'local'. (Global tags are defined by .hgtags across all
534 535 # heads, and local tags are defined in .hg/localtags.)
535 536 # They constitute the in-memory cache of tags.
536 537 self.tags = self.tagtypes = None
537 538
538 539 self.nodetagscache = self.tagslist = None
539 540
540 541 cache = tagscache()
541 542 cache.tags, cache.tagtypes = self._findtags()
542 543
543 544 return cache
544 545
545 546 def tags(self):
546 547 '''return a mapping of tag to node'''
547 548 t = {}
548 549 if self.changelog.filteredrevs:
549 550 tags, tt = self._findtags()
550 551 else:
551 552 tags = self._tagscache.tags
552 553 for k, v in tags.iteritems():
553 554 try:
554 555 # ignore tags to unknown nodes
555 556 self.changelog.rev(v)
556 557 t[k] = v
557 558 except (error.LookupError, ValueError):
558 559 pass
559 560 return t
560 561
561 562 def _findtags(self):
562 563 '''Do the hard work of finding tags. Return a pair of dicts
563 564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
564 565 maps tag name to a string like \'global\' or \'local\'.
565 566 Subclasses or extensions are free to add their own tags, but
566 567 should be aware that the returned dicts will be retained for the
567 568 duration of the localrepo object.'''
568 569
569 570 # XXX what tagtype should subclasses/extensions use? Currently
570 571 # mq and bookmarks add tags, but do not set the tagtype at all.
571 572 # Should each extension invent its own tag type? Should there
572 573 # be one tagtype for all such "virtual" tags? Or is the status
573 574 # quo fine?
574 575
575 576 alltags = {} # map tag name to (node, hist)
576 577 tagtypes = {}
577 578
578 579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
579 580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
580 581
581 582 # Build the return dicts. Have to re-encode tag names because
582 583 # the tags module always uses UTF-8 (in order not to lose info
583 584 # writing to the cache), but the rest of Mercurial wants them in
584 585 # local encoding.
585 586 tags = {}
586 587 for (name, (node, hist)) in alltags.iteritems():
587 588 if node != nullid:
588 589 tags[encoding.tolocal(name)] = node
589 590 tags['tip'] = self.changelog.tip()
590 591 tagtypes = dict([(encoding.tolocal(name), value)
591 592 for (name, value) in tagtypes.iteritems()])
592 593 return (tags, tagtypes)
593 594
594 595 def tagtype(self, tagname):
595 596 '''
596 597 return the type of the given tag. result can be:
597 598
598 599 'local' : a local tag
599 600 'global' : a global tag
600 601 None : tag does not exist
601 602 '''
602 603
603 604 return self._tagscache.tagtypes.get(tagname)
604 605
605 606 def tagslist(self):
606 607 '''return a list of tags ordered by revision'''
607 608 if not self._tagscache.tagslist:
608 609 l = []
609 610 for t, n in self.tags().iteritems():
610 611 r = self.changelog.rev(n)
611 612 l.append((r, t, n))
612 613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
613 614
614 615 return self._tagscache.tagslist
615 616
616 617 def nodetags(self, node):
617 618 '''return the tags associated with a node'''
618 619 if not self._tagscache.nodetagscache:
619 620 nodetagscache = {}
620 621 for t, n in self._tagscache.tags.iteritems():
621 622 nodetagscache.setdefault(n, []).append(t)
622 623 for tags in nodetagscache.itervalues():
623 624 tags.sort()
624 625 self._tagscache.nodetagscache = nodetagscache
625 626 return self._tagscache.nodetagscache.get(node, [])
626 627
627 628 def nodebookmarks(self, node):
628 629 marks = []
629 630 for bookmark, n in self._bookmarks.iteritems():
630 631 if n == node:
631 632 marks.append(bookmark)
632 633 return sorted(marks)
633 634
634 635 def branchmap(self):
635 636 '''returns a dictionary {branch: [branchheads]}'''
636 637 branchmap.updatecache(self)
637 638 return self._branchcaches[self.filtername]
638 639
639 640
640 641 def _branchtip(self, heads):
641 642 '''return the tipmost branch head in heads'''
642 643 tip = heads[-1]
643 644 for h in reversed(heads):
644 645 if not self[h].closesbranch():
645 646 tip = h
646 647 break
647 648 return tip
648 649
649 650 def branchtip(self, branch):
650 651 '''return the tip node for a given branch'''
651 652 if branch not in self.branchmap():
652 653 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
653 654 return self._branchtip(self.branchmap()[branch])
654 655
655 656 def branchtags(self):
656 657 '''return a dict where branch names map to the tipmost head of
657 658 the branch, open heads come before closed'''
658 659 bt = {}
659 660 for bn, heads in self.branchmap().iteritems():
660 661 bt[bn] = self._branchtip(heads)
661 662 return bt
662 663
663 664 def lookup(self, key):
664 665 return self[key].node()
665 666
666 667 def lookupbranch(self, key, remote=None):
667 668 repo = remote or self
668 669 if key in repo.branchmap():
669 670 return key
670 671
671 672 repo = (remote and remote.local()) and remote or self
672 673 return repo[key].branch()
673 674
674 675 def known(self, nodes):
675 676 nm = self.changelog.nodemap
676 677 pc = self._phasecache
677 678 result = []
678 679 for n in nodes:
679 680 r = nm.get(n)
680 681 resp = not (r is None or pc.phase(self, r) >= phases.secret)
681 682 result.append(resp)
682 683 return result
683 684
684 685 def local(self):
685 686 return self
686 687
687 688 def cancopy(self):
688 689 return self.local() # so statichttprepo's override of local() works
689 690
690 691 def join(self, f):
691 692 return os.path.join(self.path, f)
692 693
693 694 def wjoin(self, f):
694 695 return os.path.join(self.root, f)
695 696
696 697 def file(self, f):
697 698 if f[0] == '/':
698 699 f = f[1:]
699 700 return filelog.filelog(self.sopener, f)
700 701
701 702 def changectx(self, changeid):
702 703 return self[changeid]
703 704
704 705 def parents(self, changeid=None):
705 706 '''get list of changectxs for parents of changeid'''
706 707 return self[changeid].parents()
707 708
708 709 def setparents(self, p1, p2=nullid):
709 710 copies = self.dirstate.setparents(p1, p2)
710 711 pctx = self[p1]
711 712 if copies:
712 713 # Adjust copy records, the dirstate cannot do it, it
713 714 # requires access to parents manifests. Preserve them
714 715 # only for entries added to first parent.
715 716 for f in copies:
716 717 if f not in pctx and copies[f] in pctx:
717 718 self.dirstate.copy(copies[f], f)
718 719 if p2 == nullid:
719 720 for f, s in sorted(self.dirstate.copies().items()):
720 721 if f not in pctx and s not in pctx:
721 722 self.dirstate.copy(None, f)
722 723
723 724 def filectx(self, path, changeid=None, fileid=None):
724 725 """changeid can be a changeset revision, node, or tag.
725 726 fileid can be a file revision or node."""
726 727 return context.filectx(self, path, changeid, fileid)
727 728
728 729 def getcwd(self):
729 730 return self.dirstate.getcwd()
730 731
731 732 def pathto(self, f, cwd=None):
732 733 return self.dirstate.pathto(f, cwd)
733 734
734 735 def wfile(self, f, mode='r'):
735 736 return self.wopener(f, mode)
736 737
737 738 def _link(self, f):
738 739 return self.wvfs.islink(f)
739 740
740 741 def _loadfilter(self, filter):
741 742 if filter not in self.filterpats:
742 743 l = []
743 744 for pat, cmd in self.ui.configitems(filter):
744 745 if cmd == '!':
745 746 continue
746 747 mf = matchmod.match(self.root, '', [pat])
747 748 fn = None
748 749 params = cmd
749 750 for name, filterfn in self._datafilters.iteritems():
750 751 if cmd.startswith(name):
751 752 fn = filterfn
752 753 params = cmd[len(name):].lstrip()
753 754 break
754 755 if not fn:
755 756 fn = lambda s, c, **kwargs: util.filter(s, c)
756 757 # Wrap old filters not supporting keyword arguments
757 758 if not inspect.getargspec(fn)[2]:
758 759 oldfn = fn
759 760 fn = lambda s, c, **kwargs: oldfn(s, c)
760 761 l.append((mf, fn, params))
761 762 self.filterpats[filter] = l
762 763 return self.filterpats[filter]
763 764
764 765 def _filter(self, filterpats, filename, data):
765 766 for mf, fn, cmd in filterpats:
766 767 if mf(filename):
767 768 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
768 769 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
769 770 break
770 771
771 772 return data
772 773
773 774 @unfilteredpropertycache
774 775 def _encodefilterpats(self):
775 776 return self._loadfilter('encode')
776 777
777 778 @unfilteredpropertycache
778 779 def _decodefilterpats(self):
779 780 return self._loadfilter('decode')
780 781
781 782 def adddatafilter(self, name, filter):
782 783 self._datafilters[name] = filter
783 784
784 785 def wread(self, filename):
785 786 if self._link(filename):
786 787 data = self.wvfs.readlink(filename)
787 788 else:
788 789 data = self.wopener.read(filename)
789 790 return self._filter(self._encodefilterpats, filename, data)
790 791
791 792 def wwrite(self, filename, data, flags):
792 793 data = self._filter(self._decodefilterpats, filename, data)
793 794 if 'l' in flags:
794 795 self.wopener.symlink(data, filename)
795 796 else:
796 797 self.wopener.write(filename, data)
797 798 if 'x' in flags:
798 799 self.wvfs.setflags(filename, False, True)
799 800
800 801 def wwritedata(self, filename, data):
801 802 return self._filter(self._decodefilterpats, filename, data)
802 803
803 804 def transaction(self, desc):
804 805 tr = self._transref and self._transref() or None
805 806 if tr and tr.running():
806 807 return tr.nest()
807 808
808 809 # abort here if the journal already exists
809 810 if self.svfs.exists("journal"):
810 811 raise error.RepoError(
811 812 _("abandoned transaction found - run hg recover"))
812 813
813 814 self._writejournal(desc)
814 815 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
815 816
816 817 tr = transaction.transaction(self.ui.warn, self.sopener,
817 818 self.sjoin("journal"),
818 819 aftertrans(renames),
819 820 self.store.createmode)
820 821 self._transref = weakref.ref(tr)
821 822 return tr
822 823
823 824 def _journalfiles(self):
824 825 return ((self.svfs, 'journal'),
825 826 (self.vfs, 'journal.dirstate'),
826 827 (self.vfs, 'journal.branch'),
827 828 (self.vfs, 'journal.desc'),
828 829 (self.vfs, 'journal.bookmarks'),
829 830 (self.svfs, 'journal.phaseroots'))
830 831
831 832 def undofiles(self):
832 833 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
833 834
834 835 def _writejournal(self, desc):
835 836 self.opener.write("journal.dirstate",
836 837 self.opener.tryread("dirstate"))
837 838 self.opener.write("journal.branch",
838 839 encoding.fromlocal(self.dirstate.branch()))
839 840 self.opener.write("journal.desc",
840 841 "%d\n%s\n" % (len(self), desc))
841 842 self.opener.write("journal.bookmarks",
842 843 self.opener.tryread("bookmarks"))
843 844 self.sopener.write("journal.phaseroots",
844 845 self.sopener.tryread("phaseroots"))
845 846
846 847 def recover(self):
847 848 lock = self.lock()
848 849 try:
849 850 if self.svfs.exists("journal"):
850 851 self.ui.status(_("rolling back interrupted transaction\n"))
851 852 transaction.rollback(self.sopener, self.sjoin("journal"),
852 853 self.ui.warn)
853 854 self.invalidate()
854 855 return True
855 856 else:
856 857 self.ui.warn(_("no interrupted transaction available\n"))
857 858 return False
858 859 finally:
859 860 lock.release()
860 861
861 862 def rollback(self, dryrun=False, force=False):
862 863 wlock = lock = None
863 864 try:
864 865 wlock = self.wlock()
865 866 lock = self.lock()
866 867 if self.svfs.exists("undo"):
867 868 return self._rollback(dryrun, force)
868 869 else:
869 870 self.ui.warn(_("no rollback information available\n"))
870 871 return 1
871 872 finally:
872 873 release(lock, wlock)
873 874
874 875 @unfilteredmethod # Until we get smarter cache management
875 876 def _rollback(self, dryrun, force):
876 877 ui = self.ui
877 878 try:
878 879 args = self.opener.read('undo.desc').splitlines()
879 880 (oldlen, desc, detail) = (int(args[0]), args[1], None)
880 881 if len(args) >= 3:
881 882 detail = args[2]
882 883 oldtip = oldlen - 1
883 884
884 885 if detail and ui.verbose:
885 886 msg = (_('repository tip rolled back to revision %s'
886 887 ' (undo %s: %s)\n')
887 888 % (oldtip, desc, detail))
888 889 else:
889 890 msg = (_('repository tip rolled back to revision %s'
890 891 ' (undo %s)\n')
891 892 % (oldtip, desc))
892 893 except IOError:
893 894 msg = _('rolling back unknown transaction\n')
894 895 desc = None
895 896
896 897 if not force and self['.'] != self['tip'] and desc == 'commit':
897 898 raise util.Abort(
898 899 _('rollback of last commit while not checked out '
899 900 'may lose data'), hint=_('use -f to force'))
900 901
901 902 ui.status(msg)
902 903 if dryrun:
903 904 return 0
904 905
905 906 parents = self.dirstate.parents()
906 907 self.destroying()
907 908 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
908 909 if self.vfs.exists('undo.bookmarks'):
909 910 self.vfs.rename('undo.bookmarks', 'bookmarks')
910 911 if self.svfs.exists('undo.phaseroots'):
911 912 self.svfs.rename('undo.phaseroots', 'phaseroots')
912 913 self.invalidate()
913 914
914 915 parentgone = (parents[0] not in self.changelog.nodemap or
915 916 parents[1] not in self.changelog.nodemap)
916 917 if parentgone:
917 918 self.vfs.rename('undo.dirstate', 'dirstate')
918 919 try:
919 920 branch = self.opener.read('undo.branch')
920 921 self.dirstate.setbranch(encoding.tolocal(branch))
921 922 except IOError:
922 923 ui.warn(_('named branch could not be reset: '
923 924 'current branch is still \'%s\'\n')
924 925 % self.dirstate.branch())
925 926
926 927 self.dirstate.invalidate()
927 928 parents = tuple([p.rev() for p in self.parents()])
928 929 if len(parents) > 1:
929 930 ui.status(_('working directory now based on '
930 931 'revisions %d and %d\n') % parents)
931 932 else:
932 933 ui.status(_('working directory now based on '
933 934 'revision %d\n') % parents)
934 935 # TODO: if we know which new heads may result from this rollback, pass
935 936 # them to destroy(), which will prevent the branchhead cache from being
936 937 # invalidated.
937 938 self.destroyed()
938 939 return 0
939 940
940 941 def invalidatecaches(self):
941 942
942 943 if '_tagscache' in vars(self):
943 944 # can't use delattr on proxy
944 945 del self.__dict__['_tagscache']
945 946
946 947 self.unfiltered()._branchcaches.clear()
947 948 self.invalidatevolatilesets()
948 949
949 950 def invalidatevolatilesets(self):
950 951 self.filteredrevcache.clear()
951 952 obsolete.clearobscaches(self)
952 953
953 954 def invalidatedirstate(self):
954 955 '''Invalidates the dirstate, causing the next call to dirstate
955 956 to check if it was modified since the last time it was read,
956 957 rereading it if it has.
957 958
958 959 This is different to dirstate.invalidate() that it doesn't always
959 960 rereads the dirstate. Use dirstate.invalidate() if you want to
960 961 explicitly read the dirstate again (i.e. restoring it to a previous
961 962 known good state).'''
962 963 if hasunfilteredcache(self, 'dirstate'):
963 964 for k in self.dirstate._filecache:
964 965 try:
965 966 delattr(self.dirstate, k)
966 967 except AttributeError:
967 968 pass
968 969 delattr(self.unfiltered(), 'dirstate')
969 970
970 971 def invalidate(self):
971 972 unfiltered = self.unfiltered() # all file caches are stored unfiltered
972 973 for k in self._filecache:
973 974 # dirstate is invalidated separately in invalidatedirstate()
974 975 if k == 'dirstate':
975 976 continue
976 977
977 978 try:
978 979 delattr(unfiltered, k)
979 980 except AttributeError:
980 981 pass
981 982 self.invalidatecaches()
982 983
983 984 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
984 985 try:
985 986 l = lock.lock(lockname, 0, releasefn, desc=desc)
986 987 except error.LockHeld, inst:
987 988 if not wait:
988 989 raise
989 990 self.ui.warn(_("waiting for lock on %s held by %r\n") %
990 991 (desc, inst.locker))
991 992 # default to 600 seconds timeout
992 993 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
993 994 releasefn, desc=desc)
994 995 if acquirefn:
995 996 acquirefn()
996 997 return l
997 998
998 999 def _afterlock(self, callback):
999 1000 """add a callback to the current repository lock.
1000 1001
1001 1002 The callback will be executed on lock release."""
1002 1003 l = self._lockref and self._lockref()
1003 1004 if l:
1004 1005 l.postrelease.append(callback)
1005 1006 else:
1006 1007 callback()
1007 1008
1008 1009 def lock(self, wait=True):
1009 1010 '''Lock the repository store (.hg/store) and return a weak reference
1010 1011 to the lock. Use this before modifying the store (e.g. committing or
1011 1012 stripping). If you are opening a transaction, get a lock as well.)'''
1012 1013 l = self._lockref and self._lockref()
1013 1014 if l is not None and l.held:
1014 1015 l.lock()
1015 1016 return l
1016 1017
1017 1018 def unlock():
1018 1019 self.store.write()
1019 1020 if hasunfilteredcache(self, '_phasecache'):
1020 1021 self._phasecache.write()
1021 1022 for k, ce in self._filecache.items():
1022 1023 if k == 'dirstate' or k not in self.__dict__:
1023 1024 continue
1024 1025 ce.refresh()
1025 1026
1026 1027 l = self._lock(self.sjoin("lock"), wait, unlock,
1027 1028 self.invalidate, _('repository %s') % self.origroot)
1028 1029 self._lockref = weakref.ref(l)
1029 1030 return l
1030 1031
1031 1032 def wlock(self, wait=True):
1032 1033 '''Lock the non-store parts of the repository (everything under
1033 1034 .hg except .hg/store) and return a weak reference to the lock.
1034 1035 Use this before modifying files in .hg.'''
1035 1036 l = self._wlockref and self._wlockref()
1036 1037 if l is not None and l.held:
1037 1038 l.lock()
1038 1039 return l
1039 1040
1040 1041 def unlock():
1041 1042 self.dirstate.write()
1042 1043 self._filecache['dirstate'].refresh()
1043 1044
1044 1045 l = self._lock(self.join("wlock"), wait, unlock,
1045 1046 self.invalidatedirstate, _('working directory of %s') %
1046 1047 self.origroot)
1047 1048 self._wlockref = weakref.ref(l)
1048 1049 return l
1049 1050
1050 1051 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1051 1052 """
1052 1053 commit an individual file as part of a larger transaction
1053 1054 """
1054 1055
1055 1056 fname = fctx.path()
1056 1057 text = fctx.data()
1057 1058 flog = self.file(fname)
1058 1059 fparent1 = manifest1.get(fname, nullid)
1059 1060 fparent2 = fparent2o = manifest2.get(fname, nullid)
1060 1061
1061 1062 meta = {}
1062 1063 copy = fctx.renamed()
1063 1064 if copy and copy[0] != fname:
1064 1065 # Mark the new revision of this file as a copy of another
1065 1066 # file. This copy data will effectively act as a parent
1066 1067 # of this new revision. If this is a merge, the first
1067 1068 # parent will be the nullid (meaning "look up the copy data")
1068 1069 # and the second one will be the other parent. For example:
1069 1070 #
1070 1071 # 0 --- 1 --- 3 rev1 changes file foo
1071 1072 # \ / rev2 renames foo to bar and changes it
1072 1073 # \- 2 -/ rev3 should have bar with all changes and
1073 1074 # should record that bar descends from
1074 1075 # bar in rev2 and foo in rev1
1075 1076 #
1076 1077 # this allows this merge to succeed:
1077 1078 #
1078 1079 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1079 1080 # \ / merging rev3 and rev4 should use bar@rev2
1080 1081 # \- 2 --- 4 as the merge base
1081 1082 #
1082 1083
1083 1084 cfname = copy[0]
1084 1085 crev = manifest1.get(cfname)
1085 1086 newfparent = fparent2
1086 1087
1087 1088 if manifest2: # branch merge
1088 1089 if fparent2 == nullid or crev is None: # copied on remote side
1089 1090 if cfname in manifest2:
1090 1091 crev = manifest2[cfname]
1091 1092 newfparent = fparent1
1092 1093
1093 1094 # find source in nearest ancestor if we've lost track
1094 1095 if not crev:
1095 1096 self.ui.debug(" %s: searching for copy revision for %s\n" %
1096 1097 (fname, cfname))
1097 1098 for ancestor in self[None].ancestors():
1098 1099 if cfname in ancestor:
1099 1100 crev = ancestor[cfname].filenode()
1100 1101 break
1101 1102
1102 1103 if crev:
1103 1104 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1104 1105 meta["copy"] = cfname
1105 1106 meta["copyrev"] = hex(crev)
1106 1107 fparent1, fparent2 = nullid, newfparent
1107 1108 else:
1108 1109 self.ui.warn(_("warning: can't find ancestor for '%s' "
1109 1110 "copied from '%s'!\n") % (fname, cfname))
1110 1111
1111 1112 elif fparent2 != nullid:
1112 1113 # is one parent an ancestor of the other?
1113 1114 fparentancestor = flog.ancestor(fparent1, fparent2)
1114 1115 if fparentancestor == fparent1:
1115 1116 fparent1, fparent2 = fparent2, nullid
1116 1117 elif fparentancestor == fparent2:
1117 1118 fparent2 = nullid
1118 1119
1119 1120 # is the file changed?
1120 1121 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1121 1122 changelist.append(fname)
1122 1123 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1123 1124
1124 1125 # are just the flags changed during merge?
1125 1126 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1126 1127 changelist.append(fname)
1127 1128
1128 1129 return fparent1
1129 1130
1130 1131 @unfilteredmethod
1131 1132 def commit(self, text="", user=None, date=None, match=None, force=False,
1132 1133 editor=False, extra={}):
1133 1134 """Add a new revision to current repository.
1134 1135
1135 1136 Revision information is gathered from the working directory,
1136 1137 match can be used to filter the committed files. If editor is
1137 1138 supplied, it is called to get a commit message.
1138 1139 """
1139 1140
1140 1141 def fail(f, msg):
1141 1142 raise util.Abort('%s: %s' % (f, msg))
1142 1143
1143 1144 if not match:
1144 1145 match = matchmod.always(self.root, '')
1145 1146
1146 1147 if not force:
1147 1148 vdirs = []
1148 1149 match.explicitdir = vdirs.append
1149 1150 match.bad = fail
1150 1151
1151 1152 wlock = self.wlock()
1152 1153 try:
1153 1154 wctx = self[None]
1154 1155 merge = len(wctx.parents()) > 1
1155 1156
1156 1157 if (not force and merge and match and
1157 1158 (match.files() or match.anypats())):
1158 1159 raise util.Abort(_('cannot partially commit a merge '
1159 1160 '(do not specify files or patterns)'))
1160 1161
1161 1162 changes = self.status(match=match, clean=force)
1162 1163 if force:
1163 1164 changes[0].extend(changes[6]) # mq may commit unchanged files
1164 1165
1165 1166 # check subrepos
1166 1167 subs = []
1167 1168 commitsubs = set()
1168 1169 newstate = wctx.substate.copy()
1169 1170 # only manage subrepos and .hgsubstate if .hgsub is present
1170 1171 if '.hgsub' in wctx:
1171 1172 # we'll decide whether to track this ourselves, thanks
1172 1173 if '.hgsubstate' in changes[0]:
1173 1174 changes[0].remove('.hgsubstate')
1174 1175 if '.hgsubstate' in changes[2]:
1175 1176 changes[2].remove('.hgsubstate')
1176 1177
1177 1178 # compare current state to last committed state
1178 1179 # build new substate based on last committed state
1179 1180 oldstate = wctx.p1().substate
1180 1181 for s in sorted(newstate.keys()):
1181 1182 if not match(s):
1182 1183 # ignore working copy, use old state if present
1183 1184 if s in oldstate:
1184 1185 newstate[s] = oldstate[s]
1185 1186 continue
1186 1187 if not force:
1187 1188 raise util.Abort(
1188 1189 _("commit with new subrepo %s excluded") % s)
1189 1190 if wctx.sub(s).dirty(True):
1190 1191 if not self.ui.configbool('ui', 'commitsubrepos'):
1191 1192 raise util.Abort(
1192 1193 _("uncommitted changes in subrepo %s") % s,
1193 1194 hint=_("use --subrepos for recursive commit"))
1194 1195 subs.append(s)
1195 1196 commitsubs.add(s)
1196 1197 else:
1197 1198 bs = wctx.sub(s).basestate()
1198 1199 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1199 1200 if oldstate.get(s, (None, None, None))[1] != bs:
1200 1201 subs.append(s)
1201 1202
1202 1203 # check for removed subrepos
1203 1204 for p in wctx.parents():
1204 1205 r = [s for s in p.substate if s not in newstate]
1205 1206 subs += [s for s in r if match(s)]
1206 1207 if subs:
1207 1208 if (not match('.hgsub') and
1208 1209 '.hgsub' in (wctx.modified() + wctx.added())):
1209 1210 raise util.Abort(
1210 1211 _("can't commit subrepos without .hgsub"))
1211 1212 changes[0].insert(0, '.hgsubstate')
1212 1213
1213 1214 elif '.hgsub' in changes[2]:
1214 1215 # clean up .hgsubstate when .hgsub is removed
1215 1216 if ('.hgsubstate' in wctx and
1216 1217 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1217 1218 changes[2].insert(0, '.hgsubstate')
1218 1219
1219 1220 # make sure all explicit patterns are matched
1220 1221 if not force and match.files():
1221 1222 matched = set(changes[0] + changes[1] + changes[2])
1222 1223
1223 1224 for f in match.files():
1224 1225 f = self.dirstate.normalize(f)
1225 1226 if f == '.' or f in matched or f in wctx.substate:
1226 1227 continue
1227 1228 if f in changes[3]: # missing
1228 1229 fail(f, _('file not found!'))
1229 1230 if f in vdirs: # visited directory
1230 1231 d = f + '/'
1231 1232 for mf in matched:
1232 1233 if mf.startswith(d):
1233 1234 break
1234 1235 else:
1235 1236 fail(f, _("no match under directory!"))
1236 1237 elif f not in self.dirstate:
1237 1238 fail(f, _("file not tracked!"))
1238 1239
1239 1240 cctx = context.workingctx(self, text, user, date, extra, changes)
1240 1241
1241 1242 if (not force and not extra.get("close") and not merge
1242 1243 and not cctx.files()
1243 1244 and wctx.branch() == wctx.p1().branch()):
1244 1245 return None
1245 1246
1246 1247 if merge and cctx.deleted():
1247 1248 raise util.Abort(_("cannot commit merge with missing files"))
1248 1249
1249 1250 ms = mergemod.mergestate(self)
1250 1251 for f in changes[0]:
1251 1252 if f in ms and ms[f] == 'u':
1252 1253 raise util.Abort(_("unresolved merge conflicts "
1253 1254 "(see hg help resolve)"))
1254 1255
1255 1256 if editor:
1256 1257 cctx._text = editor(self, cctx, subs)
1257 1258 edited = (text != cctx._text)
1258 1259
1259 1260 # commit subs and write new state
1260 1261 if subs:
1261 1262 for s in sorted(commitsubs):
1262 1263 sub = wctx.sub(s)
1263 1264 self.ui.status(_('committing subrepository %s\n') %
1264 1265 subrepo.subrelpath(sub))
1265 1266 sr = sub.commit(cctx._text, user, date)
1266 1267 newstate[s] = (newstate[s][0], sr)
1267 1268 subrepo.writestate(self, newstate)
1268 1269
1269 1270 # Save commit message in case this transaction gets rolled back
1270 1271 # (e.g. by a pretxncommit hook). Leave the content alone on
1271 1272 # the assumption that the user will use the same editor again.
1272 1273 msgfn = self.savecommitmessage(cctx._text)
1273 1274
1274 1275 p1, p2 = self.dirstate.parents()
1275 1276 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1276 1277 try:
1277 1278 self.hook("precommit", throw=True, parent1=hookp1,
1278 1279 parent2=hookp2)
1279 1280 ret = self.commitctx(cctx, True)
1280 1281 except: # re-raises
1281 1282 if edited:
1282 1283 self.ui.write(
1283 1284 _('note: commit message saved in %s\n') % msgfn)
1284 1285 raise
1285 1286
1286 1287 # update bookmarks, dirstate and mergestate
1287 1288 bookmarks.update(self, [p1, p2], ret)
1288 1289 cctx.markcommitted(ret)
1289 1290 ms.reset()
1290 1291 finally:
1291 1292 wlock.release()
1292 1293
1293 1294 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1294 1295 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1295 1296 self._afterlock(commithook)
1296 1297 return ret
1297 1298
1298 1299 @unfilteredmethod
1299 1300 def commitctx(self, ctx, error=False):
1300 1301 """Add a new revision to current repository.
1301 1302 Revision information is passed via the context argument.
1302 1303 """
1303 1304
1304 1305 tr = lock = None
1305 1306 removed = list(ctx.removed())
1306 1307 p1, p2 = ctx.p1(), ctx.p2()
1307 1308 user = ctx.user()
1308 1309
1309 1310 lock = self.lock()
1310 1311 try:
1311 1312 tr = self.transaction("commit")
1312 1313 trp = weakref.proxy(tr)
1313 1314
1314 1315 if ctx.files():
1315 1316 m1 = p1.manifest().copy()
1316 1317 m2 = p2.manifest()
1317 1318
1318 1319 # check in files
1319 1320 new = {}
1320 1321 changed = []
1321 1322 linkrev = len(self)
1322 1323 for f in sorted(ctx.modified() + ctx.added()):
1323 1324 self.ui.note(f + "\n")
1324 1325 try:
1325 1326 fctx = ctx[f]
1326 1327 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1327 1328 changed)
1328 1329 m1.set(f, fctx.flags())
1329 1330 except OSError, inst:
1330 1331 self.ui.warn(_("trouble committing %s!\n") % f)
1331 1332 raise
1332 1333 except IOError, inst:
1333 1334 errcode = getattr(inst, 'errno', errno.ENOENT)
1334 1335 if error or errcode and errcode != errno.ENOENT:
1335 1336 self.ui.warn(_("trouble committing %s!\n") % f)
1336 1337 raise
1337 1338 else:
1338 1339 removed.append(f)
1339 1340
1340 1341 # update manifest
1341 1342 m1.update(new)
1342 1343 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1343 1344 drop = [f for f in removed if f in m1]
1344 1345 for f in drop:
1345 1346 del m1[f]
1346 1347 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1347 1348 p2.manifestnode(), (new, drop))
1348 1349 files = changed + removed
1349 1350 else:
1350 1351 mn = p1.manifestnode()
1351 1352 files = []
1352 1353
1353 1354 # update changelog
1354 1355 self.changelog.delayupdate()
1355 1356 n = self.changelog.add(mn, files, ctx.description(),
1356 1357 trp, p1.node(), p2.node(),
1357 1358 user, ctx.date(), ctx.extra().copy())
1358 1359 p = lambda: self.changelog.writepending() and self.root or ""
1359 1360 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1360 1361 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1361 1362 parent2=xp2, pending=p)
1362 1363 self.changelog.finalize(trp)
1363 1364 # set the new commit is proper phase
1364 1365 targetphase = phases.newcommitphase(self.ui)
1365 1366 if targetphase:
1366 1367 # retract boundary do not alter parent changeset.
1367 1368 # if a parent have higher the resulting phase will
1368 1369 # be compliant anyway
1369 1370 #
1370 1371 # if minimal phase was 0 we don't need to retract anything
1371 1372 phases.retractboundary(self, targetphase, [n])
1372 1373 tr.close()
1373 1374 branchmap.updatecache(self.filtered('served'))
1374 1375 return n
1375 1376 finally:
1376 1377 if tr:
1377 1378 tr.release()
1378 1379 lock.release()
1379 1380
1380 1381 @unfilteredmethod
1381 1382 def destroying(self):
1382 1383 '''Inform the repository that nodes are about to be destroyed.
1383 1384 Intended for use by strip and rollback, so there's a common
1384 1385 place for anything that has to be done before destroying history.
1385 1386
1386 1387 This is mostly useful for saving state that is in memory and waiting
1387 1388 to be flushed when the current lock is released. Because a call to
1388 1389 destroyed is imminent, the repo will be invalidated causing those
1389 1390 changes to stay in memory (waiting for the next unlock), or vanish
1390 1391 completely.
1391 1392 '''
1392 1393 # When using the same lock to commit and strip, the phasecache is left
1393 1394 # dirty after committing. Then when we strip, the repo is invalidated,
1394 1395 # causing those changes to disappear.
1395 1396 if '_phasecache' in vars(self):
1396 1397 self._phasecache.write()
1397 1398
1398 1399 @unfilteredmethod
1399 1400 def destroyed(self):
1400 1401 '''Inform the repository that nodes have been destroyed.
1401 1402 Intended for use by strip and rollback, so there's a common
1402 1403 place for anything that has to be done after destroying history.
1403 1404 '''
1404 1405 # When one tries to:
1405 1406 # 1) destroy nodes thus calling this method (e.g. strip)
1406 1407 # 2) use phasecache somewhere (e.g. commit)
1407 1408 #
1408 1409 # then 2) will fail because the phasecache contains nodes that were
1409 1410 # removed. We can either remove phasecache from the filecache,
1410 1411 # causing it to reload next time it is accessed, or simply filter
1411 1412 # the removed nodes now and write the updated cache.
1412 1413 self._phasecache.filterunknown(self)
1413 1414 self._phasecache.write()
1414 1415
1415 1416 # update the 'served' branch cache to help read only server process
1416 1417 # Thanks to branchcache collaboration this is done from the nearest
1417 1418 # filtered subset and it is expected to be fast.
1418 1419 branchmap.updatecache(self.filtered('served'))
1419 1420
1420 1421 # Ensure the persistent tag cache is updated. Doing it now
1421 1422 # means that the tag cache only has to worry about destroyed
1422 1423 # heads immediately after a strip/rollback. That in turn
1423 1424 # guarantees that "cachetip == currenttip" (comparing both rev
1424 1425 # and node) always means no nodes have been added or destroyed.
1425 1426
1426 1427 # XXX this is suboptimal when qrefresh'ing: we strip the current
1427 1428 # head, refresh the tag cache, then immediately add a new head.
1428 1429 # But I think doing it this way is necessary for the "instant
1429 1430 # tag cache retrieval" case to work.
1430 1431 self.invalidate()
1431 1432
1432 1433 def walk(self, match, node=None):
1433 1434 '''
1434 1435 walk recursively through the directory tree or a given
1435 1436 changeset, finding all files matched by the match
1436 1437 function
1437 1438 '''
1438 1439 return self[node].walk(match)
1439 1440
1440 1441 def status(self, node1='.', node2=None, match=None,
1441 1442 ignored=False, clean=False, unknown=False,
1442 1443 listsubrepos=False):
1443 1444 """return status of files between two nodes or node and working
1444 1445 directory.
1445 1446
1446 1447 If node1 is None, use the first dirstate parent instead.
1447 1448 If node2 is None, compare node1 with working directory.
1448 1449 """
1449 1450
1450 1451 def mfmatches(ctx):
1451 1452 mf = ctx.manifest().copy()
1452 1453 if match.always():
1453 1454 return mf
1454 1455 for fn in mf.keys():
1455 1456 if not match(fn):
1456 1457 del mf[fn]
1457 1458 return mf
1458 1459
1459 1460 if isinstance(node1, context.changectx):
1460 1461 ctx1 = node1
1461 1462 else:
1462 1463 ctx1 = self[node1]
1463 1464 if isinstance(node2, context.changectx):
1464 1465 ctx2 = node2
1465 1466 else:
1466 1467 ctx2 = self[node2]
1467 1468
1468 1469 working = ctx2.rev() is None
1469 1470 parentworking = working and ctx1 == self['.']
1470 1471 match = match or matchmod.always(self.root, self.getcwd())
1471 1472 listignored, listclean, listunknown = ignored, clean, unknown
1472 1473
1473 1474 # load earliest manifest first for caching reasons
1474 1475 if not working and ctx2.rev() < ctx1.rev():
1475 1476 ctx2.manifest()
1476 1477
1477 1478 if not parentworking:
1478 1479 def bad(f, msg):
1479 1480 # 'f' may be a directory pattern from 'match.files()',
1480 1481 # so 'f not in ctx1' is not enough
1481 1482 if f not in ctx1 and f not in ctx1.dirs():
1482 1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1483 1484 match.bad = bad
1484 1485
1485 1486 if working: # we need to scan the working dir
1486 1487 subrepos = []
1487 1488 if '.hgsub' in self.dirstate:
1488 1489 subrepos = sorted(ctx2.substate)
1489 1490 s = self.dirstate.status(match, subrepos, listignored,
1490 1491 listclean, listunknown)
1491 1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1492 1493
1493 1494 # check for any possibly clean files
1494 1495 if parentworking and cmp:
1495 1496 fixup = []
1496 1497 # do a full compare of any files that might have changed
1497 1498 for f in sorted(cmp):
1498 1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1499 1500 or ctx1[f].cmp(ctx2[f])):
1500 1501 modified.append(f)
1501 1502 else:
1502 1503 fixup.append(f)
1503 1504
1504 1505 # update dirstate for files that are actually clean
1505 1506 if fixup:
1506 1507 if listclean:
1507 1508 clean += fixup
1508 1509
1509 1510 try:
1510 1511 # updating the dirstate is optional
1511 1512 # so we don't wait on the lock
1512 1513 wlock = self.wlock(False)
1513 1514 try:
1514 1515 for f in fixup:
1515 1516 self.dirstate.normal(f)
1516 1517 finally:
1517 1518 wlock.release()
1518 1519 except error.LockError:
1519 1520 pass
1520 1521
1521 1522 if not parentworking:
1522 1523 mf1 = mfmatches(ctx1)
1523 1524 if working:
1524 1525 # we are comparing working dir against non-parent
1525 1526 # generate a pseudo-manifest for the working dir
1526 1527 mf2 = mfmatches(self['.'])
1527 1528 for f in cmp + modified + added:
1528 1529 mf2[f] = None
1529 1530 mf2.set(f, ctx2.flags(f))
1530 1531 for f in removed:
1531 1532 if f in mf2:
1532 1533 del mf2[f]
1533 1534 else:
1534 1535 # we are comparing two revisions
1535 1536 deleted, unknown, ignored = [], [], []
1536 1537 mf2 = mfmatches(ctx2)
1537 1538
1538 1539 modified, added, clean = [], [], []
1539 1540 withflags = mf1.withflags() | mf2.withflags()
1540 1541 for fn, mf2node in mf2.iteritems():
1541 1542 if fn in mf1:
1542 1543 if (fn not in deleted and
1543 1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1544 1545 (mf1[fn] != mf2node and
1545 1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1546 1547 modified.append(fn)
1547 1548 elif listclean:
1548 1549 clean.append(fn)
1549 1550 del mf1[fn]
1550 1551 elif fn not in deleted:
1551 1552 added.append(fn)
1552 1553 removed = mf1.keys()
1553 1554
1554 1555 if working and modified and not self.dirstate._checklink:
1555 1556 # Symlink placeholders may get non-symlink-like contents
1556 1557 # via user error or dereferencing by NFS or Samba servers,
1557 1558 # so we filter out any placeholders that don't look like a
1558 1559 # symlink
1559 1560 sane = []
1560 1561 for f in modified:
1561 1562 if ctx2.flags(f) == 'l':
1562 1563 d = ctx2[f].data()
1563 1564 if len(d) >= 1024 or '\n' in d or util.binary(d):
1564 1565 self.ui.debug('ignoring suspect symlink placeholder'
1565 1566 ' "%s"\n' % f)
1566 1567 continue
1567 1568 sane.append(f)
1568 1569 modified = sane
1569 1570
1570 1571 r = modified, added, removed, deleted, unknown, ignored, clean
1571 1572
1572 1573 if listsubrepos:
1573 1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1574 1575 if working:
1575 1576 rev2 = None
1576 1577 else:
1577 1578 rev2 = ctx2.substate[subpath][1]
1578 1579 try:
1579 1580 submatch = matchmod.narrowmatcher(subpath, match)
1580 1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1581 1582 clean=listclean, unknown=listunknown,
1582 1583 listsubrepos=True)
1583 1584 for rfiles, sfiles in zip(r, s):
1584 1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1585 1586 except error.LookupError:
1586 1587 self.ui.status(_("skipping missing subrepository: %s\n")
1587 1588 % subpath)
1588 1589
1589 1590 for l in r:
1590 1591 l.sort()
1591 1592 return r
1592 1593
1593 1594 def heads(self, start=None):
1594 1595 heads = self.changelog.heads(start)
1595 1596 # sort the output in rev descending order
1596 1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1597 1598
1598 1599 def branchheads(self, branch=None, start=None, closed=False):
1599 1600 '''return a (possibly filtered) list of heads for the given branch
1600 1601
1601 1602 Heads are returned in topological order, from newest to oldest.
1602 1603 If branch is None, use the dirstate branch.
1603 1604 If start is not None, return only heads reachable from start.
1604 1605 If closed is True, return heads that are marked as closed as well.
1605 1606 '''
1606 1607 if branch is None:
1607 1608 branch = self[None].branch()
1608 1609 branches = self.branchmap()
1609 1610 if branch not in branches:
1610 1611 return []
1611 1612 # the cache returns heads ordered lowest to highest
1612 1613 bheads = list(reversed(branches[branch]))
1613 1614 if start is not None:
1614 1615 # filter out the heads that cannot be reached from startrev
1615 1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1616 1617 bheads = [h for h in bheads if h in fbheads]
1617 1618 if not closed:
1618 1619 bheads = [h for h in bheads if not self[h].closesbranch()]
1619 1620 return bheads
1620 1621
1621 1622 def branches(self, nodes):
1622 1623 if not nodes:
1623 1624 nodes = [self.changelog.tip()]
1624 1625 b = []
1625 1626 for n in nodes:
1626 1627 t = n
1627 1628 while True:
1628 1629 p = self.changelog.parents(n)
1629 1630 if p[1] != nullid or p[0] == nullid:
1630 1631 b.append((t, n, p[0], p[1]))
1631 1632 break
1632 1633 n = p[0]
1633 1634 return b
1634 1635
1635 1636 def between(self, pairs):
1636 1637 r = []
1637 1638
1638 1639 for top, bottom in pairs:
1639 1640 n, l, i = top, [], 0
1640 1641 f = 1
1641 1642
1642 1643 while n != bottom and n != nullid:
1643 1644 p = self.changelog.parents(n)[0]
1644 1645 if i == f:
1645 1646 l.append(n)
1646 1647 f = f * 2
1647 1648 n = p
1648 1649 i += 1
1649 1650
1650 1651 r.append(l)
1651 1652
1652 1653 return r
1653 1654
1654 1655 def pull(self, remote, heads=None, force=False):
1655 1656 # don't open transaction for nothing or you break future useful
1656 1657 # rollback call
1657 1658 tr = None
1658 1659 trname = 'pull\n' + util.hidepassword(remote.url())
1659 1660 lock = self.lock()
1660 1661 try:
1661 1662 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1662 1663 force=force)
1663 1664 common, fetch, rheads = tmp
1664 1665 if not fetch:
1665 1666 self.ui.status(_("no changes found\n"))
1666 1667 added = []
1667 1668 result = 0
1668 1669 else:
1669 1670 tr = self.transaction(trname)
1670 1671 if heads is None and list(common) == [nullid]:
1671 1672 self.ui.status(_("requesting all changes\n"))
1672 1673 elif heads is None and remote.capable('changegroupsubset'):
1673 1674 # issue1320, avoid a race if remote changed after discovery
1674 1675 heads = rheads
1675 1676
1676 1677 if remote.capable('getbundle'):
1678 # TODO: get bundlecaps from remote
1677 1679 cg = remote.getbundle('pull', common=common,
1678 1680 heads=heads or rheads)
1679 1681 elif heads is None:
1680 1682 cg = remote.changegroup(fetch, 'pull')
1681 1683 elif not remote.capable('changegroupsubset'):
1682 1684 raise util.Abort(_("partial pull cannot be done because "
1683 1685 "other repository doesn't support "
1684 1686 "changegroupsubset."))
1685 1687 else:
1686 1688 cg = remote.changegroupsubset(fetch, heads, 'pull')
1687 1689 # we use unfiltered changelog here because hidden revision must
1688 1690 # be taken in account for phase synchronization. They may
1689 1691 # becomes public and becomes visible again.
1690 1692 cl = self.unfiltered().changelog
1691 1693 clstart = len(cl)
1692 1694 result = self.addchangegroup(cg, 'pull', remote.url())
1693 1695 clend = len(cl)
1694 1696 added = [cl.node(r) for r in xrange(clstart, clend)]
1695 1697
1696 1698 # compute target subset
1697 1699 if heads is None:
1698 1700 # We pulled every thing possible
1699 1701 # sync on everything common
1700 1702 subset = common + added
1701 1703 else:
1702 1704 # We pulled a specific subset
1703 1705 # sync on this subset
1704 1706 subset = heads
1705 1707
1706 1708 # Get remote phases data from remote
1707 1709 remotephases = remote.listkeys('phases')
1708 1710 publishing = bool(remotephases.get('publishing', False))
1709 1711 if remotephases and not publishing:
1710 1712 # remote is new and unpublishing
1711 1713 pheads, _dr = phases.analyzeremotephases(self, subset,
1712 1714 remotephases)
1713 1715 phases.advanceboundary(self, phases.public, pheads)
1714 1716 phases.advanceboundary(self, phases.draft, subset)
1715 1717 else:
1716 1718 # Remote is old or publishing all common changesets
1717 1719 # should be seen as public
1718 1720 phases.advanceboundary(self, phases.public, subset)
1719 1721
1720 1722 def gettransaction():
1721 1723 if tr is None:
1722 1724 return self.transaction(trname)
1723 1725 return tr
1724 1726
1725 1727 obstr = obsolete.syncpull(self, remote, gettransaction)
1726 1728 if obstr is not None:
1727 1729 tr = obstr
1728 1730
1729 1731 if tr is not None:
1730 1732 tr.close()
1731 1733 finally:
1732 1734 if tr is not None:
1733 1735 tr.release()
1734 1736 lock.release()
1735 1737
1736 1738 return result
1737 1739
1738 1740 def checkpush(self, force, revs):
1739 1741 """Extensions can override this function if additional checks have
1740 1742 to be performed before pushing, or call it if they override push
1741 1743 command.
1742 1744 """
1743 1745 pass
1744 1746
1745 1747 def push(self, remote, force=False, revs=None, newbranch=False):
1746 1748 '''Push outgoing changesets (limited by revs) from the current
1747 1749 repository to remote. Return an integer:
1748 1750 - None means nothing to push
1749 1751 - 0 means HTTP error
1750 1752 - 1 means we pushed and remote head count is unchanged *or*
1751 1753 we have outgoing changesets but refused to push
1752 1754 - other values as described by addchangegroup()
1753 1755 '''
1754 1756 # there are two ways to push to remote repo:
1755 1757 #
1756 1758 # addchangegroup assumes local user can lock remote
1757 1759 # repo (local filesystem, old ssh servers).
1758 1760 #
1759 1761 # unbundle assumes local user cannot lock remote repo (new ssh
1760 1762 # servers, http servers).
1761 1763
1762 1764 if not remote.canpush():
1763 1765 raise util.Abort(_("destination does not support push"))
1764 1766 unfi = self.unfiltered()
1765 1767 def localphasemove(nodes, phase=phases.public):
1766 1768 """move <nodes> to <phase> in the local source repo"""
1767 1769 if locallock is not None:
1768 1770 phases.advanceboundary(self, phase, nodes)
1769 1771 else:
1770 1772 # repo is not locked, do not change any phases!
1771 1773 # Informs the user that phases should have been moved when
1772 1774 # applicable.
1773 1775 actualmoves = [n for n in nodes if phase < self[n].phase()]
1774 1776 phasestr = phases.phasenames[phase]
1775 1777 if actualmoves:
1776 1778 self.ui.status(_('cannot lock source repo, skipping local'
1777 1779 ' %s phase update\n') % phasestr)
1778 1780 # get local lock as we might write phase data
1779 1781 locallock = None
1780 1782 try:
1781 1783 locallock = self.lock()
1782 1784 except IOError, err:
1783 1785 if err.errno != errno.EACCES:
1784 1786 raise
1785 1787 # source repo cannot be locked.
1786 1788 # We do not abort the push, but just disable the local phase
1787 1789 # synchronisation.
1788 1790 msg = 'cannot lock source repository: %s\n' % err
1789 1791 self.ui.debug(msg)
1790 1792 try:
1791 1793 self.checkpush(force, revs)
1792 1794 lock = None
1793 1795 unbundle = remote.capable('unbundle')
1794 1796 if not unbundle:
1795 1797 lock = remote.lock()
1796 1798 try:
1797 1799 # discovery
1798 1800 fci = discovery.findcommonincoming
1799 1801 commoninc = fci(unfi, remote, force=force)
1800 1802 common, inc, remoteheads = commoninc
1801 1803 fco = discovery.findcommonoutgoing
1802 1804 outgoing = fco(unfi, remote, onlyheads=revs,
1803 1805 commoninc=commoninc, force=force)
1804 1806
1805 1807
1806 1808 if not outgoing.missing:
1807 1809 # nothing to push
1808 1810 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1809 1811 ret = None
1810 1812 else:
1811 1813 # something to push
1812 1814 if not force:
1813 1815 # if self.obsstore == False --> no obsolete
1814 1816 # then, save the iteration
1815 1817 if unfi.obsstore:
1816 1818 # this message are here for 80 char limit reason
1817 1819 mso = _("push includes obsolete changeset: %s!")
1818 1820 mst = "push includes %s changeset: %s!"
1819 1821 # plain versions for i18n tool to detect them
1820 1822 _("push includes unstable changeset: %s!")
1821 1823 _("push includes bumped changeset: %s!")
1822 1824 _("push includes divergent changeset: %s!")
1823 1825 # If we are to push if there is at least one
1824 1826 # obsolete or unstable changeset in missing, at
1825 1827 # least one of the missinghead will be obsolete or
1826 1828 # unstable. So checking heads only is ok
1827 1829 for node in outgoing.missingheads:
1828 1830 ctx = unfi[node]
1829 1831 if ctx.obsolete():
1830 1832 raise util.Abort(mso % ctx)
1831 1833 elif ctx.troubled():
1832 1834 raise util.Abort(_(mst)
1833 1835 % (ctx.troubles()[0],
1834 1836 ctx))
1835 1837 discovery.checkheads(unfi, remote, outgoing,
1836 1838 remoteheads, newbranch,
1837 1839 bool(inc))
1838 1840
1841 # TODO: get bundlecaps from remote
1842 bundlecaps = None
1839 1843 # create a changegroup from local
1840 1844 if revs is None and not outgoing.excluded:
1841 1845 # push everything,
1842 1846 # use the fast path, no race possible on push
1843 bundler = changegroup.bundle10()
1847 bundler = changegroup.bundle10(bundlecaps)
1844 1848 cg = self._changegroup(outgoing.missing, bundler,
1845 1849 'push')
1846 1850 else:
1847 cg = self.getlocalbundle('push', outgoing)
1851 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1848 1852
1849 1853 # apply changegroup to remote
1850 1854 if unbundle:
1851 1855 # local repo finds heads on server, finds out what
1852 1856 # revs it must push. once revs transferred, if server
1853 1857 # finds it has different heads (someone else won
1854 1858 # commit/push race), server aborts.
1855 1859 if force:
1856 1860 remoteheads = ['force']
1857 1861 # ssh: return remote's addchangegroup()
1858 1862 # http: return remote's addchangegroup() or 0 for error
1859 1863 ret = remote.unbundle(cg, remoteheads, 'push')
1860 1864 else:
1861 1865 # we return an integer indicating remote head count
1862 1866 # change
1863 1867 ret = remote.addchangegroup(cg, 'push', self.url())
1864 1868
1865 1869 if ret:
1866 1870 # push succeed, synchronize target of the push
1867 1871 cheads = outgoing.missingheads
1868 1872 elif revs is None:
1869 1873 # All out push fails. synchronize all common
1870 1874 cheads = outgoing.commonheads
1871 1875 else:
1872 1876 # I want cheads = heads(::missingheads and ::commonheads)
1873 1877 # (missingheads is revs with secret changeset filtered out)
1874 1878 #
1875 1879 # This can be expressed as:
1876 1880 # cheads = ( (missingheads and ::commonheads)
1877 1881 # + (commonheads and ::missingheads))"
1878 1882 # )
1879 1883 #
1880 1884 # while trying to push we already computed the following:
1881 1885 # common = (::commonheads)
1882 1886 # missing = ((commonheads::missingheads) - commonheads)
1883 1887 #
1884 1888 # We can pick:
1885 1889 # * missingheads part of common (::commonheads)
1886 1890 common = set(outgoing.common)
1887 1891 cheads = [node for node in revs if node in common]
1888 1892 # and
1889 1893 # * commonheads parents on missing
1890 1894 revset = unfi.set('%ln and parents(roots(%ln))',
1891 1895 outgoing.commonheads,
1892 1896 outgoing.missing)
1893 1897 cheads.extend(c.node() for c in revset)
1894 1898 # even when we don't push, exchanging phase data is useful
1895 1899 remotephases = remote.listkeys('phases')
1896 1900 if (self.ui.configbool('ui', '_usedassubrepo', False)
1897 1901 and remotephases # server supports phases
1898 1902 and ret is None # nothing was pushed
1899 1903 and remotephases.get('publishing', False)):
1900 1904 # When:
1901 1905 # - this is a subrepo push
1902 1906 # - and remote support phase
1903 1907 # - and no changeset was pushed
1904 1908 # - and remote is publishing
1905 1909 # We may be in issue 3871 case!
1906 1910 # We drop the possible phase synchronisation done by
1907 1911 # courtesy to publish changesets possibly locally draft
1908 1912 # on the remote.
1909 1913 remotephases = {'publishing': 'True'}
1910 1914 if not remotephases: # old server or public only repo
1911 1915 localphasemove(cheads)
1912 1916 # don't push any phase data as there is nothing to push
1913 1917 else:
1914 1918 ana = phases.analyzeremotephases(self, cheads, remotephases)
1915 1919 pheads, droots = ana
1916 1920 ### Apply remote phase on local
1917 1921 if remotephases.get('publishing', False):
1918 1922 localphasemove(cheads)
1919 1923 else: # publish = False
1920 1924 localphasemove(pheads)
1921 1925 localphasemove(cheads, phases.draft)
1922 1926 ### Apply local phase on remote
1923 1927
1924 1928 # Get the list of all revs draft on remote by public here.
1925 1929 # XXX Beware that revset break if droots is not strictly
1926 1930 # XXX root we may want to ensure it is but it is costly
1927 1931 outdated = unfi.set('heads((%ln::%ln) and public())',
1928 1932 droots, cheads)
1929 1933 for newremotehead in outdated:
1930 1934 r = remote.pushkey('phases',
1931 1935 newremotehead.hex(),
1932 1936 str(phases.draft),
1933 1937 str(phases.public))
1934 1938 if not r:
1935 1939 self.ui.warn(_('updating %s to public failed!\n')
1936 1940 % newremotehead)
1937 1941 self.ui.debug('try to push obsolete markers to remote\n')
1938 1942 obsolete.syncpush(self, remote)
1939 1943 finally:
1940 1944 if lock is not None:
1941 1945 lock.release()
1942 1946 finally:
1943 1947 if locallock is not None:
1944 1948 locallock.release()
1945 1949
1946 1950 self.ui.debug("checking for updated bookmarks\n")
1947 1951 rb = remote.listkeys('bookmarks')
1948 1952 for k in rb.keys():
1949 1953 if k in unfi._bookmarks:
1950 1954 nr, nl = rb[k], hex(self._bookmarks[k])
1951 1955 if nr in unfi:
1952 1956 cr = unfi[nr]
1953 1957 cl = unfi[nl]
1954 1958 if bookmarks.validdest(unfi, cr, cl):
1955 1959 r = remote.pushkey('bookmarks', k, nr, nl)
1956 1960 if r:
1957 1961 self.ui.status(_("updating bookmark %s\n") % k)
1958 1962 else:
1959 1963 self.ui.warn(_('updating bookmark %s'
1960 1964 ' failed!\n') % k)
1961 1965
1962 1966 return ret
1963 1967
1964 1968 def changegroupinfo(self, nodes, source):
1965 1969 if self.ui.verbose or source == 'bundle':
1966 1970 self.ui.status(_("%d changesets found\n") % len(nodes))
1967 1971 if self.ui.debugflag:
1968 1972 self.ui.debug("list of changesets:\n")
1969 1973 for node in nodes:
1970 1974 self.ui.debug("%s\n" % hex(node))
1971 1975
1972 1976 def changegroupsubset(self, bases, heads, source):
1973 1977 """Compute a changegroup consisting of all the nodes that are
1974 1978 descendants of any of the bases and ancestors of any of the heads.
1975 1979 Return a chunkbuffer object whose read() method will return
1976 1980 successive changegroup chunks.
1977 1981
1978 1982 It is fairly complex as determining which filenodes and which
1979 1983 manifest nodes need to be included for the changeset to be complete
1980 1984 is non-trivial.
1981 1985
1982 1986 Another wrinkle is doing the reverse, figuring out which changeset in
1983 1987 the changegroup a particular filenode or manifestnode belongs to.
1984 1988 """
1985 1989 cl = self.changelog
1986 1990 if not bases:
1987 1991 bases = [nullid]
1988 1992 csets, bases, heads = cl.nodesbetween(bases, heads)
1989 1993 # We assume that all ancestors of bases are known
1990 1994 common = cl.ancestors([cl.rev(n) for n in bases])
1991 1995 bundler = changegroup.bundle10()
1992 1996 return self._changegroupsubset(common, csets, heads, bundler, source)
1993 1997
1994 def getlocalbundle(self, source, outgoing):
1998 def getlocalbundle(self, source, outgoing, bundlecaps=None):
1995 1999 """Like getbundle, but taking a discovery.outgoing as an argument.
1996 2000
1997 2001 This is only implemented for local repos and reuses potentially
1998 2002 precomputed sets in outgoing."""
1999 2003 if not outgoing.missing:
2000 2004 return None
2001 bundler = changegroup.bundle10()
2005 bundler = changegroup.bundle10(bundlecaps)
2002 2006 return self._changegroupsubset(outgoing.common,
2003 2007 outgoing.missing,
2004 2008 outgoing.missingheads,
2005 2009 bundler,
2006 2010 source)
2007 2011
2008 def getbundle(self, source, heads=None, common=None):
2012 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2009 2013 """Like changegroupsubset, but returns the set difference between the
2010 2014 ancestors of heads and the ancestors common.
2011 2015
2012 2016 If heads is None, use the local heads. If common is None, use [nullid].
2013 2017
2014 2018 The nodes in common might not all be known locally due to the way the
2015 2019 current discovery protocol works.
2016 2020 """
2017 2021 cl = self.changelog
2018 2022 if common:
2019 2023 hasnode = cl.hasnode
2020 2024 common = [n for n in common if hasnode(n)]
2021 2025 else:
2022 2026 common = [nullid]
2023 2027 if not heads:
2024 2028 heads = cl.heads()
2025 2029 return self.getlocalbundle(source,
2026 discovery.outgoing(cl, common, heads))
2030 discovery.outgoing(cl, common, heads),
2031 bundlecaps=bundlecaps)
2027 2032
2028 2033 @unfilteredmethod
2029 2034 def _changegroupsubset(self, commonrevs, csets, heads, bundler, source):
2030 2035
2031 2036 cl = self.changelog
2032 2037 mf = self.manifest
2033 2038 mfs = {} # needed manifests
2034 2039 fnodes = {} # needed file nodes
2035 2040 changedfiles = set()
2036 2041 fstate = ['', {}]
2037 2042 count = [0, 0]
2038 2043
2039 2044 # can we go through the fast path ?
2040 2045 heads.sort()
2041 2046 if heads == sorted(self.heads()):
2042 2047 return self._changegroup(csets, bundler, source)
2043 2048
2044 2049 # slow path
2045 2050 self.hook('preoutgoing', throw=True, source=source)
2046 2051 self.changegroupinfo(csets, source)
2047 2052
2048 2053 # filter any nodes that claim to be part of the known set
2049 2054 def prune(revlog, missing):
2050 2055 rr, rl = revlog.rev, revlog.linkrev
2051 2056 return [n for n in missing
2052 2057 if rl(rr(n)) not in commonrevs]
2053 2058
2054 2059 progress = self.ui.progress
2055 2060 _bundling = _('bundling')
2056 2061 _changesets = _('changesets')
2057 2062 _manifests = _('manifests')
2058 2063 _files = _('files')
2059 2064
2060 2065 def lookup(revlog, x):
2061 2066 if revlog == cl:
2062 2067 c = cl.read(x)
2063 2068 changedfiles.update(c[3])
2064 2069 mfs.setdefault(c[0], x)
2065 2070 count[0] += 1
2066 2071 progress(_bundling, count[0],
2067 2072 unit=_changesets, total=count[1])
2068 2073 return x
2069 2074 elif revlog == mf:
2070 2075 clnode = mfs[x]
2071 2076 mdata = mf.readfast(x)
2072 2077 for f, n in mdata.iteritems():
2073 2078 if f in changedfiles:
2074 2079 fnodes[f].setdefault(n, clnode)
2075 2080 count[0] += 1
2076 2081 progress(_bundling, count[0],
2077 2082 unit=_manifests, total=count[1])
2078 2083 return clnode
2079 2084 else:
2080 2085 progress(_bundling, count[0], item=fstate[0],
2081 2086 unit=_files, total=count[1])
2082 2087 return fstate[1][x]
2083 2088
2084 2089 bundler.start(lookup)
2085 2090 reorder = self.ui.config('bundle', 'reorder', 'auto')
2086 2091 if reorder == 'auto':
2087 2092 reorder = None
2088 2093 else:
2089 2094 reorder = util.parsebool(reorder)
2090 2095
2091 2096 def gengroup():
2092 2097 # Create a changenode group generator that will call our functions
2093 2098 # back to lookup the owning changenode and collect information.
2094 2099 count[:] = [0, len(csets)]
2095 2100 for chunk in bundler.group(csets, cl, reorder=reorder):
2096 2101 yield chunk
2097 2102 progress(_bundling, None)
2098 2103
2099 2104 # Create a generator for the manifestnodes that calls our lookup
2100 2105 # and data collection functions back.
2101 2106 for f in changedfiles:
2102 2107 fnodes[f] = {}
2103 2108 count[:] = [0, len(mfs)]
2104 2109 for chunk in bundler.group(prune(mf, mfs), mf, reorder=reorder):
2105 2110 yield chunk
2106 2111 progress(_bundling, None)
2107 2112
2108 2113 mfs.clear()
2109 2114
2110 2115 # Go through all our files in order sorted by name.
2111 2116 count[:] = [0, len(changedfiles)]
2112 2117 for fname in sorted(changedfiles):
2113 2118 filerevlog = self.file(fname)
2114 2119 if not len(filerevlog):
2115 2120 raise util.Abort(_("empty or missing revlog for %s")
2116 2121 % fname)
2117 2122 fstate[0] = fname
2118 2123 fstate[1] = fnodes.pop(fname, {})
2119 2124
2120 2125 nodelist = prune(filerevlog, fstate[1])
2121 2126 if nodelist:
2122 2127 count[0] += 1
2123 2128 yield bundler.fileheader(fname)
2124 2129 for chunk in bundler.group(nodelist, filerevlog, reorder):
2125 2130 yield chunk
2126 2131
2127 2132 # Signal that no more groups are left.
2128 2133 yield bundler.close()
2129 2134 progress(_bundling, None)
2130 2135
2131 2136 if csets:
2132 2137 self.hook('outgoing', node=hex(csets[0]), source=source)
2133 2138
2134 2139 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2135 2140
2136 2141 def changegroup(self, basenodes, source):
2137 2142 # to avoid a race we use changegroupsubset() (issue1320)
2138 2143 return self.changegroupsubset(basenodes, self.heads(), source)
2139 2144
2140 2145 @unfilteredmethod
2141 2146 def _changegroup(self, nodes, bundler, source):
2142 2147 """Compute the changegroup of all nodes that we have that a recipient
2143 2148 doesn't. Return a chunkbuffer object whose read() method will return
2144 2149 successive changegroup chunks.
2145 2150
2146 2151 This is much easier than the previous function as we can assume that
2147 2152 the recipient has any changenode we aren't sending them.
2148 2153
2149 2154 nodes is the set of nodes to send"""
2150 2155
2151 2156 cl = self.changelog
2152 2157 mf = self.manifest
2153 2158 mfs = {}
2154 2159 changedfiles = set()
2155 2160 fstate = ['']
2156 2161 count = [0, 0]
2157 2162
2158 2163 self.hook('preoutgoing', throw=True, source=source)
2159 2164 self.changegroupinfo(nodes, source)
2160 2165
2161 2166 revset = set([cl.rev(n) for n in nodes])
2162 2167
2163 2168 def gennodelst(log):
2164 2169 ln, llr = log.node, log.linkrev
2165 2170 return [ln(r) for r in log if llr(r) in revset]
2166 2171
2167 2172 progress = self.ui.progress
2168 2173 _bundling = _('bundling')
2169 2174 _changesets = _('changesets')
2170 2175 _manifests = _('manifests')
2171 2176 _files = _('files')
2172 2177
2173 2178 def lookup(revlog, x):
2174 2179 if revlog == cl:
2175 2180 c = cl.read(x)
2176 2181 changedfiles.update(c[3])
2177 2182 mfs.setdefault(c[0], x)
2178 2183 count[0] += 1
2179 2184 progress(_bundling, count[0],
2180 2185 unit=_changesets, total=count[1])
2181 2186 return x
2182 2187 elif revlog == mf:
2183 2188 count[0] += 1
2184 2189 progress(_bundling, count[0],
2185 2190 unit=_manifests, total=count[1])
2186 2191 return cl.node(revlog.linkrev(revlog.rev(x)))
2187 2192 else:
2188 2193 progress(_bundling, count[0], item=fstate[0],
2189 2194 total=count[1], unit=_files)
2190 2195 return cl.node(revlog.linkrev(revlog.rev(x)))
2191 2196
2192 2197 bundler.start(lookup)
2193 2198 reorder = self.ui.config('bundle', 'reorder', 'auto')
2194 2199 if reorder == 'auto':
2195 2200 reorder = None
2196 2201 else:
2197 2202 reorder = util.parsebool(reorder)
2198 2203
2199 2204 def gengroup():
2200 2205 '''yield a sequence of changegroup chunks (strings)'''
2201 2206 # construct a list of all changed files
2202 2207
2203 2208 count[:] = [0, len(nodes)]
2204 2209 for chunk in bundler.group(nodes, cl, reorder=reorder):
2205 2210 yield chunk
2206 2211 progress(_bundling, None)
2207 2212
2208 2213 count[:] = [0, len(mfs)]
2209 2214 for chunk in bundler.group(gennodelst(mf), mf, reorder=reorder):
2210 2215 yield chunk
2211 2216 progress(_bundling, None)
2212 2217
2213 2218 count[:] = [0, len(changedfiles)]
2214 2219 for fname in sorted(changedfiles):
2215 2220 filerevlog = self.file(fname)
2216 2221 if not len(filerevlog):
2217 2222 raise util.Abort(_("empty or missing revlog for %s")
2218 2223 % fname)
2219 2224 fstate[0] = fname
2220 2225 nodelist = gennodelst(filerevlog)
2221 2226 if nodelist:
2222 2227 count[0] += 1
2223 2228 yield bundler.fileheader(fname)
2224 2229 for chunk in bundler.group(nodelist, filerevlog, reorder):
2225 2230 yield chunk
2226 2231 yield bundler.close()
2227 2232 progress(_bundling, None)
2228 2233
2229 2234 if nodes:
2230 2235 self.hook('outgoing', node=hex(nodes[0]), source=source)
2231 2236
2232 2237 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2233 2238
2234 2239 @unfilteredmethod
2235 2240 def addchangegroup(self, source, srctype, url, emptyok=False):
2236 2241 """Add the changegroup returned by source.read() to this repo.
2237 2242 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2238 2243 the URL of the repo where this changegroup is coming from.
2239 2244
2240 2245 Return an integer summarizing the change to this repo:
2241 2246 - nothing changed or no source: 0
2242 2247 - more heads than before: 1+added heads (2..n)
2243 2248 - fewer heads than before: -1-removed heads (-2..-n)
2244 2249 - number of heads stays the same: 1
2245 2250 """
2246 2251 def csmap(x):
2247 2252 self.ui.debug("add changeset %s\n" % short(x))
2248 2253 return len(cl)
2249 2254
2250 2255 def revmap(x):
2251 2256 return cl.rev(x)
2252 2257
2253 2258 if not source:
2254 2259 return 0
2255 2260
2256 2261 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2257 2262
2258 2263 changesets = files = revisions = 0
2259 2264 efiles = set()
2260 2265
2261 2266 # write changelog data to temp files so concurrent readers will not see
2262 2267 # inconsistent view
2263 2268 cl = self.changelog
2264 2269 cl.delayupdate()
2265 2270 oldheads = cl.heads()
2266 2271
2267 2272 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2268 2273 try:
2269 2274 trp = weakref.proxy(tr)
2270 2275 # pull off the changeset group
2271 2276 self.ui.status(_("adding changesets\n"))
2272 2277 clstart = len(cl)
2273 2278 class prog(object):
2274 2279 step = _('changesets')
2275 2280 count = 1
2276 2281 ui = self.ui
2277 2282 total = None
2278 2283 def __call__(self):
2279 2284 self.ui.progress(self.step, self.count, unit=_('chunks'),
2280 2285 total=self.total)
2281 2286 self.count += 1
2282 2287 pr = prog()
2283 2288 source.callback = pr
2284 2289
2285 2290 source.changelogheader()
2286 2291 srccontent = cl.addgroup(source, csmap, trp)
2287 2292 if not (srccontent or emptyok):
2288 2293 raise util.Abort(_("received changelog group is empty"))
2289 2294 clend = len(cl)
2290 2295 changesets = clend - clstart
2291 2296 for c in xrange(clstart, clend):
2292 2297 efiles.update(self[c].files())
2293 2298 efiles = len(efiles)
2294 2299 self.ui.progress(_('changesets'), None)
2295 2300
2296 2301 # pull off the manifest group
2297 2302 self.ui.status(_("adding manifests\n"))
2298 2303 pr.step = _('manifests')
2299 2304 pr.count = 1
2300 2305 pr.total = changesets # manifests <= changesets
2301 2306 # no need to check for empty manifest group here:
2302 2307 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2303 2308 # no new manifest will be created and the manifest group will
2304 2309 # be empty during the pull
2305 2310 source.manifestheader()
2306 2311 self.manifest.addgroup(source, revmap, trp)
2307 2312 self.ui.progress(_('manifests'), None)
2308 2313
2309 2314 needfiles = {}
2310 2315 if self.ui.configbool('server', 'validate', default=False):
2311 2316 # validate incoming csets have their manifests
2312 2317 for cset in xrange(clstart, clend):
2313 2318 mfest = self.changelog.read(self.changelog.node(cset))[0]
2314 2319 mfest = self.manifest.readdelta(mfest)
2315 2320 # store file nodes we must see
2316 2321 for f, n in mfest.iteritems():
2317 2322 needfiles.setdefault(f, set()).add(n)
2318 2323
2319 2324 # process the files
2320 2325 self.ui.status(_("adding file changes\n"))
2321 2326 pr.step = _('files')
2322 2327 pr.count = 1
2323 2328 pr.total = efiles
2324 2329 source.callback = None
2325 2330
2326 2331 while True:
2327 2332 chunkdata = source.filelogheader()
2328 2333 if not chunkdata:
2329 2334 break
2330 2335 f = chunkdata["filename"]
2331 2336 self.ui.debug("adding %s revisions\n" % f)
2332 2337 pr()
2333 2338 fl = self.file(f)
2334 2339 o = len(fl)
2335 2340 if not fl.addgroup(source, revmap, trp):
2336 2341 raise util.Abort(_("received file revlog group is empty"))
2337 2342 revisions += len(fl) - o
2338 2343 files += 1
2339 2344 if f in needfiles:
2340 2345 needs = needfiles[f]
2341 2346 for new in xrange(o, len(fl)):
2342 2347 n = fl.node(new)
2343 2348 if n in needs:
2344 2349 needs.remove(n)
2345 2350 else:
2346 2351 raise util.Abort(
2347 2352 _("received spurious file revlog entry"))
2348 2353 if not needs:
2349 2354 del needfiles[f]
2350 2355 self.ui.progress(_('files'), None)
2351 2356
2352 2357 for f, needs in needfiles.iteritems():
2353 2358 fl = self.file(f)
2354 2359 for n in needs:
2355 2360 try:
2356 2361 fl.rev(n)
2357 2362 except error.LookupError:
2358 2363 raise util.Abort(
2359 2364 _('missing file data for %s:%s - run hg verify') %
2360 2365 (f, hex(n)))
2361 2366
2362 2367 dh = 0
2363 2368 if oldheads:
2364 2369 heads = cl.heads()
2365 2370 dh = len(heads) - len(oldheads)
2366 2371 for h in heads:
2367 2372 if h not in oldheads and self[h].closesbranch():
2368 2373 dh -= 1
2369 2374 htext = ""
2370 2375 if dh:
2371 2376 htext = _(" (%+d heads)") % dh
2372 2377
2373 2378 self.ui.status(_("added %d changesets"
2374 2379 " with %d changes to %d files%s\n")
2375 2380 % (changesets, revisions, files, htext))
2376 2381 self.invalidatevolatilesets()
2377 2382
2378 2383 if changesets > 0:
2379 2384 p = lambda: cl.writepending() and self.root or ""
2380 2385 self.hook('pretxnchangegroup', throw=True,
2381 2386 node=hex(cl.node(clstart)), source=srctype,
2382 2387 url=url, pending=p)
2383 2388
2384 2389 added = [cl.node(r) for r in xrange(clstart, clend)]
2385 2390 publishing = self.ui.configbool('phases', 'publish', True)
2386 2391 if srctype == 'push':
2387 2392 # Old server can not push the boundary themself.
2388 2393 # New server won't push the boundary if changeset already
2389 2394 # existed locally as secrete
2390 2395 #
2391 2396 # We should not use added here but the list of all change in
2392 2397 # the bundle
2393 2398 if publishing:
2394 2399 phases.advanceboundary(self, phases.public, srccontent)
2395 2400 else:
2396 2401 phases.advanceboundary(self, phases.draft, srccontent)
2397 2402 phases.retractboundary(self, phases.draft, added)
2398 2403 elif srctype != 'strip':
2399 2404 # publishing only alter behavior during push
2400 2405 #
2401 2406 # strip should not touch boundary at all
2402 2407 phases.retractboundary(self, phases.draft, added)
2403 2408
2404 2409 # make changelog see real files again
2405 2410 cl.finalize(trp)
2406 2411
2407 2412 tr.close()
2408 2413
2409 2414 if changesets > 0:
2410 2415 if srctype != 'strip':
2411 2416 # During strip, branchcache is invalid but coming call to
2412 2417 # `destroyed` will repair it.
2413 2418 # In other case we can safely update cache on disk.
2414 2419 branchmap.updatecache(self.filtered('served'))
2415 2420 def runhooks():
2416 2421 # forcefully update the on-disk branch cache
2417 2422 self.ui.debug("updating the branch cache\n")
2418 2423 self.hook("changegroup", node=hex(cl.node(clstart)),
2419 2424 source=srctype, url=url)
2420 2425
2421 2426 for n in added:
2422 2427 self.hook("incoming", node=hex(n), source=srctype,
2423 2428 url=url)
2424 2429
2425 2430 newheads = [h for h in self.heads() if h not in oldheads]
2426 2431 self.ui.log("incoming",
2427 2432 "%s incoming changes - new heads: %s\n",
2428 2433 len(added),
2429 2434 ', '.join([hex(c[:6]) for c in newheads]))
2430 2435 self._afterlock(runhooks)
2431 2436
2432 2437 finally:
2433 2438 tr.release()
2434 2439 # never return 0 here:
2435 2440 if dh < 0:
2436 2441 return dh - 1
2437 2442 else:
2438 2443 return dh + 1
2439 2444
2440 2445 def stream_in(self, remote, requirements):
2441 2446 lock = self.lock()
2442 2447 try:
2443 2448 # Save remote branchmap. We will use it later
2444 2449 # to speed up branchcache creation
2445 2450 rbranchmap = None
2446 2451 if remote.capable("branchmap"):
2447 2452 rbranchmap = remote.branchmap()
2448 2453
2449 2454 fp = remote.stream_out()
2450 2455 l = fp.readline()
2451 2456 try:
2452 2457 resp = int(l)
2453 2458 except ValueError:
2454 2459 raise error.ResponseError(
2455 2460 _('unexpected response from remote server:'), l)
2456 2461 if resp == 1:
2457 2462 raise util.Abort(_('operation forbidden by server'))
2458 2463 elif resp == 2:
2459 2464 raise util.Abort(_('locking the remote repository failed'))
2460 2465 elif resp != 0:
2461 2466 raise util.Abort(_('the server sent an unknown error code'))
2462 2467 self.ui.status(_('streaming all changes\n'))
2463 2468 l = fp.readline()
2464 2469 try:
2465 2470 total_files, total_bytes = map(int, l.split(' ', 1))
2466 2471 except (ValueError, TypeError):
2467 2472 raise error.ResponseError(
2468 2473 _('unexpected response from remote server:'), l)
2469 2474 self.ui.status(_('%d files to transfer, %s of data\n') %
2470 2475 (total_files, util.bytecount(total_bytes)))
2471 2476 handled_bytes = 0
2472 2477 self.ui.progress(_('clone'), 0, total=total_bytes)
2473 2478 start = time.time()
2474 2479 for i in xrange(total_files):
2475 2480 # XXX doesn't support '\n' or '\r' in filenames
2476 2481 l = fp.readline()
2477 2482 try:
2478 2483 name, size = l.split('\0', 1)
2479 2484 size = int(size)
2480 2485 except (ValueError, TypeError):
2481 2486 raise error.ResponseError(
2482 2487 _('unexpected response from remote server:'), l)
2483 2488 if self.ui.debugflag:
2484 2489 self.ui.debug('adding %s (%s)\n' %
2485 2490 (name, util.bytecount(size)))
2486 2491 # for backwards compat, name was partially encoded
2487 2492 ofp = self.sopener(store.decodedir(name), 'w')
2488 2493 for chunk in util.filechunkiter(fp, limit=size):
2489 2494 handled_bytes += len(chunk)
2490 2495 self.ui.progress(_('clone'), handled_bytes,
2491 2496 total=total_bytes)
2492 2497 ofp.write(chunk)
2493 2498 ofp.close()
2494 2499 elapsed = time.time() - start
2495 2500 if elapsed <= 0:
2496 2501 elapsed = 0.001
2497 2502 self.ui.progress(_('clone'), None)
2498 2503 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2499 2504 (util.bytecount(total_bytes), elapsed,
2500 2505 util.bytecount(total_bytes / elapsed)))
2501 2506
2502 2507 # new requirements = old non-format requirements +
2503 2508 # new format-related
2504 2509 # requirements from the streamed-in repository
2505 2510 requirements.update(set(self.requirements) - self.supportedformats)
2506 2511 self._applyrequirements(requirements)
2507 2512 self._writerequirements()
2508 2513
2509 2514 if rbranchmap:
2510 2515 rbheads = []
2511 2516 for bheads in rbranchmap.itervalues():
2512 2517 rbheads.extend(bheads)
2513 2518
2514 2519 if rbheads:
2515 2520 rtiprev = max((int(self.changelog.rev(node))
2516 2521 for node in rbheads))
2517 2522 cache = branchmap.branchcache(rbranchmap,
2518 2523 self[rtiprev].node(),
2519 2524 rtiprev)
2520 2525 # Try to stick it as low as possible
2521 2526 # filter above served are unlikely to be fetch from a clone
2522 2527 for candidate in ('base', 'immutable', 'served'):
2523 2528 rview = self.filtered(candidate)
2524 2529 if cache.validfor(rview):
2525 2530 self._branchcaches[candidate] = cache
2526 2531 cache.write(rview)
2527 2532 break
2528 2533 self.invalidate()
2529 2534 return len(self.heads()) + 1
2530 2535 finally:
2531 2536 lock.release()
2532 2537
2533 2538 def clone(self, remote, heads=[], stream=False):
2534 2539 '''clone remote repository.
2535 2540
2536 2541 keyword arguments:
2537 2542 heads: list of revs to clone (forces use of pull)
2538 2543 stream: use streaming clone if possible'''
2539 2544
2540 2545 # now, all clients that can request uncompressed clones can
2541 2546 # read repo formats supported by all servers that can serve
2542 2547 # them.
2543 2548
2544 2549 # if revlog format changes, client will have to check version
2545 2550 # and format flags on "stream" capability, and use
2546 2551 # uncompressed only if compatible.
2547 2552
2548 2553 if not stream:
2549 2554 # if the server explicitly prefers to stream (for fast LANs)
2550 2555 stream = remote.capable('stream-preferred')
2551 2556
2552 2557 if stream and not heads:
2553 2558 # 'stream' means remote revlog format is revlogv1 only
2554 2559 if remote.capable('stream'):
2555 2560 return self.stream_in(remote, set(('revlogv1',)))
2556 2561 # otherwise, 'streamreqs' contains the remote revlog format
2557 2562 streamreqs = remote.capable('streamreqs')
2558 2563 if streamreqs:
2559 2564 streamreqs = set(streamreqs.split(','))
2560 2565 # if we support it, stream in and adjust our requirements
2561 2566 if not streamreqs - self.supportedformats:
2562 2567 return self.stream_in(remote, streamreqs)
2563 2568 return self.pull(remote, heads)
2564 2569
2565 2570 def pushkey(self, namespace, key, old, new):
2566 2571 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2567 2572 old=old, new=new)
2568 2573 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2569 2574 ret = pushkey.push(self, namespace, key, old, new)
2570 2575 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2571 2576 ret=ret)
2572 2577 return ret
2573 2578
2574 2579 def listkeys(self, namespace):
2575 2580 self.hook('prelistkeys', throw=True, namespace=namespace)
2576 2581 self.ui.debug('listing keys for "%s"\n' % namespace)
2577 2582 values = pushkey.list(self, namespace)
2578 2583 self.hook('listkeys', namespace=namespace, values=values)
2579 2584 return values
2580 2585
2581 2586 def debugwireargs(self, one, two, three=None, four=None, five=None):
2582 2587 '''used to test argument passing over the wire'''
2583 2588 return "%s %s %s %s %s" % (one, two, three, four, five)
2584 2589
2585 2590 def savecommitmessage(self, text):
2586 2591 fp = self.opener('last-message.txt', 'wb')
2587 2592 try:
2588 2593 fp.write(text)
2589 2594 finally:
2590 2595 fp.close()
2591 2596 return self.pathto(fp.name[len(self.root) + 1:])
2592 2597
2593 2598 # used to avoid circular references so destructors work
2594 2599 def aftertrans(files):
2595 2600 renamefiles = [tuple(t) for t in files]
2596 2601 def a():
2597 2602 for vfs, src, dest in renamefiles:
2598 2603 try:
2599 2604 vfs.rename(src, dest)
2600 2605 except OSError: # journal file does not yet exist
2601 2606 pass
2602 2607 return a
2603 2608
2604 2609 def undoname(fn):
2605 2610 base, name = os.path.split(fn)
2606 2611 assert name.startswith('journal')
2607 2612 return os.path.join(base, name.replace('journal', 'undo', 1))
2608 2613
2609 2614 def instance(ui, path, create):
2610 2615 return localrepository(ui, util.urllocalpath(path), create)
2611 2616
2612 2617 def islocal(path):
2613 2618 return True
@@ -1,660 +1,665 b''
1 1 # wireproto.py - generic wire protocol support functions
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import urllib, tempfile, os, sys
9 9 from i18n import _
10 10 from node import bin, hex
11 11 import changegroup as changegroupmod
12 12 import peer, error, encoding, util, store
13 13
14 14 # abstract batching support
15 15
16 16 class future(object):
17 17 '''placeholder for a value to be set later'''
18 18 def set(self, value):
19 19 if util.safehasattr(self, 'value'):
20 20 raise error.RepoError("future is already set")
21 21 self.value = value
22 22
23 23 class batcher(object):
24 24 '''base class for batches of commands submittable in a single request
25 25
26 26 All methods invoked on instances of this class are simply queued and
27 27 return a a future for the result. Once you call submit(), all the queued
28 28 calls are performed and the results set in their respective futures.
29 29 '''
30 30 def __init__(self):
31 31 self.calls = []
32 32 def __getattr__(self, name):
33 33 def call(*args, **opts):
34 34 resref = future()
35 35 self.calls.append((name, args, opts, resref,))
36 36 return resref
37 37 return call
38 38 def submit(self):
39 39 pass
40 40
41 41 class localbatch(batcher):
42 42 '''performs the queued calls directly'''
43 43 def __init__(self, local):
44 44 batcher.__init__(self)
45 45 self.local = local
46 46 def submit(self):
47 47 for name, args, opts, resref in self.calls:
48 48 resref.set(getattr(self.local, name)(*args, **opts))
49 49
50 50 class remotebatch(batcher):
51 51 '''batches the queued calls; uses as few roundtrips as possible'''
52 52 def __init__(self, remote):
53 53 '''remote must support _submitbatch(encbatch) and
54 54 _submitone(op, encargs)'''
55 55 batcher.__init__(self)
56 56 self.remote = remote
57 57 def submit(self):
58 58 req, rsp = [], []
59 59 for name, args, opts, resref in self.calls:
60 60 mtd = getattr(self.remote, name)
61 61 batchablefn = getattr(mtd, 'batchable', None)
62 62 if batchablefn is not None:
63 63 batchable = batchablefn(mtd.im_self, *args, **opts)
64 64 encargsorres, encresref = batchable.next()
65 65 if encresref:
66 66 req.append((name, encargsorres,))
67 67 rsp.append((batchable, encresref, resref,))
68 68 else:
69 69 resref.set(encargsorres)
70 70 else:
71 71 if req:
72 72 self._submitreq(req, rsp)
73 73 req, rsp = [], []
74 74 resref.set(mtd(*args, **opts))
75 75 if req:
76 76 self._submitreq(req, rsp)
77 77 def _submitreq(self, req, rsp):
78 78 encresults = self.remote._submitbatch(req)
79 79 for encres, r in zip(encresults, rsp):
80 80 batchable, encresref, resref = r
81 81 encresref.set(encres)
82 82 resref.set(batchable.next())
83 83
84 84 def batchable(f):
85 85 '''annotation for batchable methods
86 86
87 87 Such methods must implement a coroutine as follows:
88 88
89 89 @batchable
90 90 def sample(self, one, two=None):
91 91 # Handle locally computable results first:
92 92 if not one:
93 93 yield "a local result", None
94 94 # Build list of encoded arguments suitable for your wire protocol:
95 95 encargs = [('one', encode(one),), ('two', encode(two),)]
96 96 # Create future for injection of encoded result:
97 97 encresref = future()
98 98 # Return encoded arguments and future:
99 99 yield encargs, encresref
100 100 # Assuming the future to be filled with the result from the batched
101 101 # request now. Decode it:
102 102 yield decode(encresref.value)
103 103
104 104 The decorator returns a function which wraps this coroutine as a plain
105 105 method, but adds the original method as an attribute called "batchable",
106 106 which is used by remotebatch to split the call into separate encoding and
107 107 decoding phases.
108 108 '''
109 109 def plain(*args, **opts):
110 110 batchable = f(*args, **opts)
111 111 encargsorres, encresref = batchable.next()
112 112 if not encresref:
113 113 return encargsorres # a local result in this case
114 114 self = args[0]
115 115 encresref.set(self._submitone(f.func_name, encargsorres))
116 116 return batchable.next()
117 117 setattr(plain, 'batchable', f)
118 118 return plain
119 119
120 120 # list of nodes encoding / decoding
121 121
122 122 def decodelist(l, sep=' '):
123 123 if l:
124 124 return map(bin, l.split(sep))
125 125 return []
126 126
127 127 def encodelist(l, sep=' '):
128 128 return sep.join(map(hex, l))
129 129
130 130 # batched call argument encoding
131 131
132 132 def escapearg(plain):
133 133 return (plain
134 134 .replace(':', '::')
135 135 .replace(',', ':,')
136 136 .replace(';', ':;')
137 137 .replace('=', ':='))
138 138
139 139 def unescapearg(escaped):
140 140 return (escaped
141 141 .replace(':=', '=')
142 142 .replace(':;', ';')
143 143 .replace(':,', ',')
144 144 .replace('::', ':'))
145 145
146 146 # client side
147 147
148 148 def todict(**args):
149 149 return args
150 150
151 151 class wirepeer(peer.peerrepository):
152 152
153 153 def batch(self):
154 154 return remotebatch(self)
155 155 def _submitbatch(self, req):
156 156 cmds = []
157 157 for op, argsdict in req:
158 158 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
159 159 cmds.append('%s %s' % (op, args))
160 160 rsp = self._call("batch", cmds=';'.join(cmds))
161 161 return rsp.split(';')
162 162 def _submitone(self, op, args):
163 163 return self._call(op, **args)
164 164
165 165 @batchable
166 166 def lookup(self, key):
167 167 self.requirecap('lookup', _('look up remote revision'))
168 168 f = future()
169 169 yield todict(key=encoding.fromlocal(key)), f
170 170 d = f.value
171 171 success, data = d[:-1].split(" ", 1)
172 172 if int(success):
173 173 yield bin(data)
174 174 self._abort(error.RepoError(data))
175 175
176 176 @batchable
177 177 def heads(self):
178 178 f = future()
179 179 yield {}, f
180 180 d = f.value
181 181 try:
182 182 yield decodelist(d[:-1])
183 183 except ValueError:
184 184 self._abort(error.ResponseError(_("unexpected response:"), d))
185 185
186 186 @batchable
187 187 def known(self, nodes):
188 188 f = future()
189 189 yield todict(nodes=encodelist(nodes)), f
190 190 d = f.value
191 191 try:
192 192 yield [bool(int(f)) for f in d]
193 193 except ValueError:
194 194 self._abort(error.ResponseError(_("unexpected response:"), d))
195 195
196 196 @batchable
197 197 def branchmap(self):
198 198 f = future()
199 199 yield {}, f
200 200 d = f.value
201 201 try:
202 202 branchmap = {}
203 203 for branchpart in d.splitlines():
204 204 branchname, branchheads = branchpart.split(' ', 1)
205 205 branchname = encoding.tolocal(urllib.unquote(branchname))
206 206 branchheads = decodelist(branchheads)
207 207 branchmap[branchname] = branchheads
208 208 yield branchmap
209 209 except TypeError:
210 210 self._abort(error.ResponseError(_("unexpected response:"), d))
211 211
212 212 def branches(self, nodes):
213 213 n = encodelist(nodes)
214 214 d = self._call("branches", nodes=n)
215 215 try:
216 216 br = [tuple(decodelist(b)) for b in d.splitlines()]
217 217 return br
218 218 except ValueError:
219 219 self._abort(error.ResponseError(_("unexpected response:"), d))
220 220
221 221 def between(self, pairs):
222 222 batch = 8 # avoid giant requests
223 223 r = []
224 224 for i in xrange(0, len(pairs), batch):
225 225 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
226 226 d = self._call("between", pairs=n)
227 227 try:
228 228 r.extend(l and decodelist(l) or [] for l in d.splitlines())
229 229 except ValueError:
230 230 self._abort(error.ResponseError(_("unexpected response:"), d))
231 231 return r
232 232
233 233 @batchable
234 234 def pushkey(self, namespace, key, old, new):
235 235 if not self.capable('pushkey'):
236 236 yield False, None
237 237 f = future()
238 238 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
239 239 yield todict(namespace=encoding.fromlocal(namespace),
240 240 key=encoding.fromlocal(key),
241 241 old=encoding.fromlocal(old),
242 242 new=encoding.fromlocal(new)), f
243 243 d = f.value
244 244 d, output = d.split('\n', 1)
245 245 try:
246 246 d = bool(int(d))
247 247 except ValueError:
248 248 raise error.ResponseError(
249 249 _('push failed (unexpected response):'), d)
250 250 for l in output.splitlines(True):
251 251 self.ui.status(_('remote: '), l)
252 252 yield d
253 253
254 254 @batchable
255 255 def listkeys(self, namespace):
256 256 if not self.capable('pushkey'):
257 257 yield {}, None
258 258 f = future()
259 259 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
260 260 yield todict(namespace=encoding.fromlocal(namespace)), f
261 261 d = f.value
262 262 r = {}
263 263 for l in d.splitlines():
264 264 k, v = l.split('\t')
265 265 r[encoding.tolocal(k)] = encoding.tolocal(v)
266 266 yield r
267 267
268 268 def stream_out(self):
269 269 return self._callstream('stream_out')
270 270
271 271 def changegroup(self, nodes, kind):
272 272 n = encodelist(nodes)
273 273 f = self._callstream("changegroup", roots=n)
274 274 return changegroupmod.unbundle10(self._decompress(f), 'UN')
275 275
276 276 def changegroupsubset(self, bases, heads, kind):
277 277 self.requirecap('changegroupsubset', _('look up remote changes'))
278 278 bases = encodelist(bases)
279 279 heads = encodelist(heads)
280 280 f = self._callstream("changegroupsubset",
281 281 bases=bases, heads=heads)
282 282 return changegroupmod.unbundle10(self._decompress(f), 'UN')
283 283
284 def getbundle(self, source, heads=None, common=None):
284 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
285 285 self.requirecap('getbundle', _('look up remote changes'))
286 286 opts = {}
287 287 if heads is not None:
288 288 opts['heads'] = encodelist(heads)
289 289 if common is not None:
290 290 opts['common'] = encodelist(common)
291 if bundlecaps is not None:
292 opts['bundlecaps'] = ','.join(bundlecaps)
291 293 f = self._callstream("getbundle", **opts)
292 294 return changegroupmod.unbundle10(self._decompress(f), 'UN')
293 295
294 296 def unbundle(self, cg, heads, source):
295 297 '''Send cg (a readable file-like object representing the
296 298 changegroup to push, typically a chunkbuffer object) to the
297 299 remote server as a bundle. Return an integer indicating the
298 300 result of the push (see localrepository.addchangegroup()).'''
299 301
300 302 if heads != ['force'] and self.capable('unbundlehash'):
301 303 heads = encodelist(['hashed',
302 304 util.sha1(''.join(sorted(heads))).digest()])
303 305 else:
304 306 heads = encodelist(heads)
305 307
306 308 ret, output = self._callpush("unbundle", cg, heads=heads)
307 309 if ret == "":
308 310 raise error.ResponseError(
309 311 _('push failed:'), output)
310 312 try:
311 313 ret = int(ret)
312 314 except ValueError:
313 315 raise error.ResponseError(
314 316 _('push failed (unexpected response):'), ret)
315 317
316 318 for l in output.splitlines(True):
317 319 self.ui.status(_('remote: '), l)
318 320 return ret
319 321
320 322 def debugwireargs(self, one, two, three=None, four=None, five=None):
321 323 # don't pass optional arguments left at their default value
322 324 opts = {}
323 325 if three is not None:
324 326 opts['three'] = three
325 327 if four is not None:
326 328 opts['four'] = four
327 329 return self._call('debugwireargs', one=one, two=two, **opts)
328 330
329 331 # server side
330 332
331 333 class streamres(object):
332 334 def __init__(self, gen):
333 335 self.gen = gen
334 336
335 337 class pushres(object):
336 338 def __init__(self, res):
337 339 self.res = res
338 340
339 341 class pusherr(object):
340 342 def __init__(self, res):
341 343 self.res = res
342 344
343 345 class ooberror(object):
344 346 def __init__(self, message):
345 347 self.message = message
346 348
347 349 def dispatch(repo, proto, command):
348 350 repo = repo.filtered("served")
349 351 func, spec = commands[command]
350 352 args = proto.getargs(spec)
351 353 return func(repo, proto, *args)
352 354
353 355 def options(cmd, keys, others):
354 356 opts = {}
355 357 for k in keys:
356 358 if k in others:
357 359 opts[k] = others[k]
358 360 del others[k]
359 361 if others:
360 362 sys.stderr.write("abort: %s got unexpected arguments %s\n"
361 363 % (cmd, ",".join(others)))
362 364 return opts
363 365
364 366 def batch(repo, proto, cmds, others):
365 367 repo = repo.filtered("served")
366 368 res = []
367 369 for pair in cmds.split(';'):
368 370 op, args = pair.split(' ', 1)
369 371 vals = {}
370 372 for a in args.split(','):
371 373 if a:
372 374 n, v = a.split('=')
373 375 vals[n] = unescapearg(v)
374 376 func, spec = commands[op]
375 377 if spec:
376 378 keys = spec.split()
377 379 data = {}
378 380 for k in keys:
379 381 if k == '*':
380 382 star = {}
381 383 for key in vals.keys():
382 384 if key not in keys:
383 385 star[key] = vals[key]
384 386 data['*'] = star
385 387 else:
386 388 data[k] = vals[k]
387 389 result = func(repo, proto, *[data[k] for k in keys])
388 390 else:
389 391 result = func(repo, proto)
390 392 if isinstance(result, ooberror):
391 393 return result
392 394 res.append(escapearg(result))
393 395 return ';'.join(res)
394 396
395 397 def between(repo, proto, pairs):
396 398 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
397 399 r = []
398 400 for b in repo.between(pairs):
399 401 r.append(encodelist(b) + "\n")
400 402 return "".join(r)
401 403
402 404 def branchmap(repo, proto):
403 405 branchmap = repo.branchmap()
404 406 heads = []
405 407 for branch, nodes in branchmap.iteritems():
406 408 branchname = urllib.quote(encoding.fromlocal(branch))
407 409 branchnodes = encodelist(nodes)
408 410 heads.append('%s %s' % (branchname, branchnodes))
409 411 return '\n'.join(heads)
410 412
411 413 def branches(repo, proto, nodes):
412 414 nodes = decodelist(nodes)
413 415 r = []
414 416 for b in repo.branches(nodes):
415 417 r.append(encodelist(b) + "\n")
416 418 return "".join(r)
417 419
418 420 def capabilities(repo, proto):
419 421 caps = ('lookup changegroupsubset branchmap pushkey known getbundle '
420 422 'unbundlehash batch').split()
421 423 if _allowstream(repo.ui):
422 424 if repo.ui.configbool('server', 'preferuncompressed', False):
423 425 caps.append('stream-preferred')
424 426 requiredformats = repo.requirements & repo.supportedformats
425 427 # if our local revlogs are just revlogv1, add 'stream' cap
426 428 if not requiredformats - set(('revlogv1',)):
427 429 caps.append('stream')
428 430 # otherwise, add 'streamreqs' detailing our local revlog format
429 431 else:
430 432 caps.append('streamreqs=%s' % ','.join(requiredformats))
431 433 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
432 434 caps.append('httpheader=1024')
433 435 return ' '.join(caps)
434 436
435 437 def changegroup(repo, proto, roots):
436 438 nodes = decodelist(roots)
437 439 cg = repo.changegroup(nodes, 'serve')
438 440 return streamres(proto.groupchunks(cg))
439 441
440 442 def changegroupsubset(repo, proto, bases, heads):
441 443 bases = decodelist(bases)
442 444 heads = decodelist(heads)
443 445 cg = repo.changegroupsubset(bases, heads, 'serve')
444 446 return streamres(proto.groupchunks(cg))
445 447
446 448 def debugwireargs(repo, proto, one, two, others):
447 449 # only accept optional args from the known set
448 450 opts = options('debugwireargs', ['three', 'four'], others)
449 451 return repo.debugwireargs(one, two, **opts)
450 452
451 453 def getbundle(repo, proto, others):
452 opts = options('getbundle', ['heads', 'common'], others)
454 opts = options('getbundle', ['heads', 'common', 'bundlecaps'], others)
453 455 for k, v in opts.iteritems():
456 if k in ('heads', 'common'):
454 457 opts[k] = decodelist(v)
458 elif k == 'bundlecaps':
459 opts[k] = set(v.split(','))
455 460 cg = repo.getbundle('serve', **opts)
456 461 return streamres(proto.groupchunks(cg))
457 462
458 463 def heads(repo, proto):
459 464 h = repo.heads()
460 465 return encodelist(h) + "\n"
461 466
462 467 def hello(repo, proto):
463 468 '''the hello command returns a set of lines describing various
464 469 interesting things about the server, in an RFC822-like format.
465 470 Currently the only one defined is "capabilities", which
466 471 consists of a line in the form:
467 472
468 473 capabilities: space separated list of tokens
469 474 '''
470 475 return "capabilities: %s\n" % (capabilities(repo, proto))
471 476
472 477 def listkeys(repo, proto, namespace):
473 478 d = repo.listkeys(encoding.tolocal(namespace)).items()
474 479 t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
475 480 for k, v in d])
476 481 return t
477 482
478 483 def lookup(repo, proto, key):
479 484 try:
480 485 k = encoding.tolocal(key)
481 486 c = repo[k]
482 487 r = c.hex()
483 488 success = 1
484 489 except Exception, inst:
485 490 r = str(inst)
486 491 success = 0
487 492 return "%s %s\n" % (success, r)
488 493
489 494 def known(repo, proto, nodes, others):
490 495 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
491 496
492 497 def pushkey(repo, proto, namespace, key, old, new):
493 498 # compatibility with pre-1.8 clients which were accidentally
494 499 # sending raw binary nodes rather than utf-8-encoded hex
495 500 if len(new) == 20 and new.encode('string-escape') != new:
496 501 # looks like it could be a binary node
497 502 try:
498 503 new.decode('utf-8')
499 504 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
500 505 except UnicodeDecodeError:
501 506 pass # binary, leave unmodified
502 507 else:
503 508 new = encoding.tolocal(new) # normal path
504 509
505 510 if util.safehasattr(proto, 'restore'):
506 511
507 512 proto.redirect()
508 513
509 514 try:
510 515 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
511 516 encoding.tolocal(old), new) or False
512 517 except util.Abort:
513 518 r = False
514 519
515 520 output = proto.restore()
516 521
517 522 return '%s\n%s' % (int(r), output)
518 523
519 524 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
520 525 encoding.tolocal(old), new)
521 526 return '%s\n' % int(r)
522 527
523 528 def _allowstream(ui):
524 529 return ui.configbool('server', 'uncompressed', True, untrusted=True)
525 530
526 531 def _walkstreamfiles(repo):
527 532 # this is it's own function so extensions can override it
528 533 return repo.store.walk()
529 534
530 535 def stream(repo, proto):
531 536 '''If the server supports streaming clone, it advertises the "stream"
532 537 capability with a value representing the version and flags of the repo
533 538 it is serving. Client checks to see if it understands the format.
534 539
535 540 The format is simple: the server writes out a line with the amount
536 541 of files, then the total amount of bytes to be transferred (separated
537 542 by a space). Then, for each file, the server first writes the filename
538 543 and filesize (separated by the null character), then the file contents.
539 544 '''
540 545
541 546 if not _allowstream(repo.ui):
542 547 return '1\n'
543 548
544 549 entries = []
545 550 total_bytes = 0
546 551 try:
547 552 # get consistent snapshot of repo, lock during scan
548 553 lock = repo.lock()
549 554 try:
550 555 repo.ui.debug('scanning\n')
551 556 for name, ename, size in _walkstreamfiles(repo):
552 557 if size:
553 558 entries.append((name, size))
554 559 total_bytes += size
555 560 finally:
556 561 lock.release()
557 562 except error.LockError:
558 563 return '2\n' # error: 2
559 564
560 565 def streamer(repo, entries, total):
561 566 '''stream out all metadata files in repository.'''
562 567 yield '0\n' # success
563 568 repo.ui.debug('%d files, %d bytes to transfer\n' %
564 569 (len(entries), total_bytes))
565 570 yield '%d %d\n' % (len(entries), total_bytes)
566 571
567 572 sopener = repo.sopener
568 573 oldaudit = sopener.mustaudit
569 574 debugflag = repo.ui.debugflag
570 575 sopener.mustaudit = False
571 576
572 577 try:
573 578 for name, size in entries:
574 579 if debugflag:
575 580 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
576 581 # partially encode name over the wire for backwards compat
577 582 yield '%s\0%d\n' % (store.encodedir(name), size)
578 583 if size <= 65536:
579 584 fp = sopener(name)
580 585 try:
581 586 data = fp.read(size)
582 587 finally:
583 588 fp.close()
584 589 yield data
585 590 else:
586 591 for chunk in util.filechunkiter(sopener(name), limit=size):
587 592 yield chunk
588 593 # replace with "finally:" when support for python 2.4 has been dropped
589 594 except Exception:
590 595 sopener.mustaudit = oldaudit
591 596 raise
592 597 sopener.mustaudit = oldaudit
593 598
594 599 return streamres(streamer(repo, entries, total_bytes))
595 600
596 601 def unbundle(repo, proto, heads):
597 602 their_heads = decodelist(heads)
598 603
599 604 def check_heads():
600 605 heads = repo.heads()
601 606 heads_hash = util.sha1(''.join(sorted(heads))).digest()
602 607 return (their_heads == ['force'] or their_heads == heads or
603 608 their_heads == ['hashed', heads_hash])
604 609
605 610 proto.redirect()
606 611
607 612 # fail early if possible
608 613 if not check_heads():
609 614 return pusherr('repository changed while preparing changes - '
610 615 'please try again')
611 616
612 617 # write bundle data to temporary file because it can be big
613 618 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
614 619 fp = os.fdopen(fd, 'wb+')
615 620 r = 0
616 621 try:
617 622 proto.getfile(fp)
618 623 lock = repo.lock()
619 624 try:
620 625 if not check_heads():
621 626 # someone else committed/pushed/unbundled while we
622 627 # were transferring data
623 628 return pusherr('repository changed while uploading changes - '
624 629 'please try again')
625 630
626 631 # push can proceed
627 632 fp.seek(0)
628 633 gen = changegroupmod.readbundle(fp, None)
629 634
630 635 try:
631 636 r = repo.addchangegroup(gen, 'serve', proto._client())
632 637 except util.Abort, inst:
633 638 sys.stderr.write("abort: %s\n" % inst)
634 639 finally:
635 640 lock.release()
636 641 return pushres(r)
637 642
638 643 finally:
639 644 fp.close()
640 645 os.unlink(tempname)
641 646
642 647 commands = {
643 648 'batch': (batch, 'cmds *'),
644 649 'between': (between, 'pairs'),
645 650 'branchmap': (branchmap, ''),
646 651 'branches': (branches, 'nodes'),
647 652 'capabilities': (capabilities, ''),
648 653 'changegroup': (changegroup, 'roots'),
649 654 'changegroupsubset': (changegroupsubset, 'bases heads'),
650 655 'debugwireargs': (debugwireargs, 'one two *'),
651 656 'getbundle': (getbundle, '*'),
652 657 'heads': (heads, ''),
653 658 'hello': (hello, ''),
654 659 'known': (known, 'nodes *'),
655 660 'listkeys': (listkeys, 'namespace'),
656 661 'lookup': (lookup, 'key'),
657 662 'pushkey': (pushkey, 'namespace key old new'),
658 663 'stream_out': (stream, ''),
659 664 'unbundle': (unbundle, 'heads'),
660 665 }
General Comments 0
You need to be logged in to leave comments. Login now