##// END OF EJS Templates
move tags.cache and branchheads.cache to a collected cache folder .hg/cache/...
jfh -
r13272:5ccdca7d default
parent child Browse files
Show More
@@ -1,1937 +1,1938 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 warned = [0]
181 warned = [0]
182 def validate(node):
182 def validate(node):
183 try:
183 try:
184 r = self.changelog.rev(node)
184 r = self.changelog.rev(node)
185 return node
185 return node
186 except error.LookupError:
186 except error.LookupError:
187 if not warned[0]:
187 if not warned[0]:
188 warned[0] = True
188 warned[0] = True
189 self.ui.warn(_("warning: ignoring unknown"
189 self.ui.warn(_("warning: ignoring unknown"
190 " working parent %s!\n") % short(node))
190 " working parent %s!\n") % short(node))
191 return nullid
191 return nullid
192
192
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194
194
195 def __getitem__(self, changeid):
195 def __getitem__(self, changeid):
196 if changeid is None:
196 if changeid is None:
197 return context.workingctx(self)
197 return context.workingctx(self)
198 return context.changectx(self, changeid)
198 return context.changectx(self, changeid)
199
199
200 def __contains__(self, changeid):
200 def __contains__(self, changeid):
201 try:
201 try:
202 return bool(self.lookup(changeid))
202 return bool(self.lookup(changeid))
203 except error.RepoLookupError:
203 except error.RepoLookupError:
204 return False
204 return False
205
205
206 def __nonzero__(self):
206 def __nonzero__(self):
207 return True
207 return True
208
208
209 def __len__(self):
209 def __len__(self):
210 return len(self.changelog)
210 return len(self.changelog)
211
211
212 def __iter__(self):
212 def __iter__(self):
213 for i in xrange(len(self)):
213 for i in xrange(len(self)):
214 yield i
214 yield i
215
215
216 def url(self):
216 def url(self):
217 return 'file:' + self.root
217 return 'file:' + self.root
218
218
219 def hook(self, name, throw=False, **args):
219 def hook(self, name, throw=False, **args):
220 return hook.hook(self.ui, self, name, throw, **args)
220 return hook.hook(self.ui, self, name, throw, **args)
221
221
222 tag_disallowed = ':\r\n'
222 tag_disallowed = ':\r\n'
223
223
224 def _tag(self, names, node, message, local, user, date, extra={}):
224 def _tag(self, names, node, message, local, user, date, extra={}):
225 if isinstance(names, str):
225 if isinstance(names, str):
226 allchars = names
226 allchars = names
227 names = (names,)
227 names = (names,)
228 else:
228 else:
229 allchars = ''.join(names)
229 allchars = ''.join(names)
230 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
231 if c in allchars:
231 if c in allchars:
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233
233
234 branches = self.branchmap()
234 branches = self.branchmap()
235 for name in names:
235 for name in names:
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 local=local)
237 local=local)
238 if name in branches:
238 if name in branches:
239 self.ui.warn(_("warning: tag %s conflicts with existing"
239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 " branch name\n") % name)
240 " branch name\n") % name)
241
241
242 def writetags(fp, names, munge, prevtags):
242 def writetags(fp, names, munge, prevtags):
243 fp.seek(0, 2)
243 fp.seek(0, 2)
244 if prevtags and prevtags[-1] != '\n':
244 if prevtags and prevtags[-1] != '\n':
245 fp.write('\n')
245 fp.write('\n')
246 for name in names:
246 for name in names:
247 m = munge and munge(name) or name
247 m = munge and munge(name) or name
248 if self._tagtypes and name in self._tagtypes:
248 if self._tagtypes and name in self._tagtypes:
249 old = self._tags.get(name, nullid)
249 old = self._tags.get(name, nullid)
250 fp.write('%s %s\n' % (hex(old), m))
250 fp.write('%s %s\n' % (hex(old), m))
251 fp.write('%s %s\n' % (hex(node), m))
251 fp.write('%s %s\n' % (hex(node), m))
252 fp.close()
252 fp.close()
253
253
254 prevtags = ''
254 prevtags = ''
255 if local:
255 if local:
256 try:
256 try:
257 fp = self.opener('localtags', 'r+')
257 fp = self.opener('localtags', 'r+')
258 except IOError:
258 except IOError:
259 fp = self.opener('localtags', 'a')
259 fp = self.opener('localtags', 'a')
260 else:
260 else:
261 prevtags = fp.read()
261 prevtags = fp.read()
262
262
263 # local tags are stored in the current charset
263 # local tags are stored in the current charset
264 writetags(fp, names, None, prevtags)
264 writetags(fp, names, None, prevtags)
265 for name in names:
265 for name in names:
266 self.hook('tag', node=hex(node), tag=name, local=local)
266 self.hook('tag', node=hex(node), tag=name, local=local)
267 return
267 return
268
268
269 try:
269 try:
270 fp = self.wfile('.hgtags', 'rb+')
270 fp = self.wfile('.hgtags', 'rb+')
271 except IOError:
271 except IOError:
272 fp = self.wfile('.hgtags', 'ab')
272 fp = self.wfile('.hgtags', 'ab')
273 else:
273 else:
274 prevtags = fp.read()
274 prevtags = fp.read()
275
275
276 # committed tags are stored in UTF-8
276 # committed tags are stored in UTF-8
277 writetags(fp, names, encoding.fromlocal, prevtags)
277 writetags(fp, names, encoding.fromlocal, prevtags)
278
278
279 if '.hgtags' not in self.dirstate:
279 if '.hgtags' not in self.dirstate:
280 self[None].add(['.hgtags'])
280 self[None].add(['.hgtags'])
281
281
282 m = matchmod.exact(self.root, '', ['.hgtags'])
282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284
284
285 for name in names:
285 for name in names:
286 self.hook('tag', node=hex(node), tag=name, local=local)
286 self.hook('tag', node=hex(node), tag=name, local=local)
287
287
288 return tagnode
288 return tagnode
289
289
290 def tag(self, names, node, message, local, user, date):
290 def tag(self, names, node, message, local, user, date):
291 '''tag a revision with one or more symbolic names.
291 '''tag a revision with one or more symbolic names.
292
292
293 names is a list of strings or, when adding a single tag, names may be a
293 names is a list of strings or, when adding a single tag, names may be a
294 string.
294 string.
295
295
296 if local is True, the tags are stored in a per-repository file.
296 if local is True, the tags are stored in a per-repository file.
297 otherwise, they are stored in the .hgtags file, and a new
297 otherwise, they are stored in the .hgtags file, and a new
298 changeset is committed with the change.
298 changeset is committed with the change.
299
299
300 keyword arguments:
300 keyword arguments:
301
301
302 local: whether to store tags in non-version-controlled file
302 local: whether to store tags in non-version-controlled file
303 (default False)
303 (default False)
304
304
305 message: commit message to use if committing
305 message: commit message to use if committing
306
306
307 user: name of user to use if committing
307 user: name of user to use if committing
308
308
309 date: date tuple to use if committing'''
309 date: date tuple to use if committing'''
310
310
311 if not local:
311 if not local:
312 for x in self.status()[:5]:
312 for x in self.status()[:5]:
313 if '.hgtags' in x:
313 if '.hgtags' in x:
314 raise util.Abort(_('working copy of .hgtags is changed '
314 raise util.Abort(_('working copy of .hgtags is changed '
315 '(please commit .hgtags manually)'))
315 '(please commit .hgtags manually)'))
316
316
317 self.tags() # instantiate the cache
317 self.tags() # instantiate the cache
318 self._tag(names, node, message, local, user, date)
318 self._tag(names, node, message, local, user, date)
319
319
320 def tags(self):
320 def tags(self):
321 '''return a mapping of tag to node'''
321 '''return a mapping of tag to node'''
322 if self._tags is None:
322 if self._tags is None:
323 (self._tags, self._tagtypes) = self._findtags()
323 (self._tags, self._tagtypes) = self._findtags()
324
324
325 return self._tags
325 return self._tags
326
326
327 def _findtags(self):
327 def _findtags(self):
328 '''Do the hard work of finding tags. Return a pair of dicts
328 '''Do the hard work of finding tags. Return a pair of dicts
329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
330 maps tag name to a string like \'global\' or \'local\'.
330 maps tag name to a string like \'global\' or \'local\'.
331 Subclasses or extensions are free to add their own tags, but
331 Subclasses or extensions are free to add their own tags, but
332 should be aware that the returned dicts will be retained for the
332 should be aware that the returned dicts will be retained for the
333 duration of the localrepo object.'''
333 duration of the localrepo object.'''
334
334
335 # XXX what tagtype should subclasses/extensions use? Currently
335 # XXX what tagtype should subclasses/extensions use? Currently
336 # mq and bookmarks add tags, but do not set the tagtype at all.
336 # mq and bookmarks add tags, but do not set the tagtype at all.
337 # Should each extension invent its own tag type? Should there
337 # Should each extension invent its own tag type? Should there
338 # be one tagtype for all such "virtual" tags? Or is the status
338 # be one tagtype for all such "virtual" tags? Or is the status
339 # quo fine?
339 # quo fine?
340
340
341 alltags = {} # map tag name to (node, hist)
341 alltags = {} # map tag name to (node, hist)
342 tagtypes = {}
342 tagtypes = {}
343
343
344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
346
346
347 # Build the return dicts. Have to re-encode tag names because
347 # Build the return dicts. Have to re-encode tag names because
348 # the tags module always uses UTF-8 (in order not to lose info
348 # the tags module always uses UTF-8 (in order not to lose info
349 # writing to the cache), but the rest of Mercurial wants them in
349 # writing to the cache), but the rest of Mercurial wants them in
350 # local encoding.
350 # local encoding.
351 tags = {}
351 tags = {}
352 for (name, (node, hist)) in alltags.iteritems():
352 for (name, (node, hist)) in alltags.iteritems():
353 if node != nullid:
353 if node != nullid:
354 tags[encoding.tolocal(name)] = node
354 tags[encoding.tolocal(name)] = node
355 tags['tip'] = self.changelog.tip()
355 tags['tip'] = self.changelog.tip()
356 tagtypes = dict([(encoding.tolocal(name), value)
356 tagtypes = dict([(encoding.tolocal(name), value)
357 for (name, value) in tagtypes.iteritems()])
357 for (name, value) in tagtypes.iteritems()])
358 return (tags, tagtypes)
358 return (tags, tagtypes)
359
359
360 def tagtype(self, tagname):
360 def tagtype(self, tagname):
361 '''
361 '''
362 return the type of the given tag. result can be:
362 return the type of the given tag. result can be:
363
363
364 'local' : a local tag
364 'local' : a local tag
365 'global' : a global tag
365 'global' : a global tag
366 None : tag does not exist
366 None : tag does not exist
367 '''
367 '''
368
368
369 self.tags()
369 self.tags()
370
370
371 return self._tagtypes.get(tagname)
371 return self._tagtypes.get(tagname)
372
372
373 def tagslist(self):
373 def tagslist(self):
374 '''return a list of tags ordered by revision'''
374 '''return a list of tags ordered by revision'''
375 l = []
375 l = []
376 for t, n in self.tags().iteritems():
376 for t, n in self.tags().iteritems():
377 try:
377 try:
378 r = self.changelog.rev(n)
378 r = self.changelog.rev(n)
379 except:
379 except:
380 r = -2 # sort to the beginning of the list if unknown
380 r = -2 # sort to the beginning of the list if unknown
381 l.append((r, t, n))
381 l.append((r, t, n))
382 return [(t, n) for r, t, n in sorted(l)]
382 return [(t, n) for r, t, n in sorted(l)]
383
383
384 def nodetags(self, node):
384 def nodetags(self, node):
385 '''return the tags associated with a node'''
385 '''return the tags associated with a node'''
386 if not self.nodetagscache:
386 if not self.nodetagscache:
387 self.nodetagscache = {}
387 self.nodetagscache = {}
388 for t, n in self.tags().iteritems():
388 for t, n in self.tags().iteritems():
389 self.nodetagscache.setdefault(n, []).append(t)
389 self.nodetagscache.setdefault(n, []).append(t)
390 for tags in self.nodetagscache.itervalues():
390 for tags in self.nodetagscache.itervalues():
391 tags.sort()
391 tags.sort()
392 return self.nodetagscache.get(node, [])
392 return self.nodetagscache.get(node, [])
393
393
394 def _branchtags(self, partial, lrev):
394 def _branchtags(self, partial, lrev):
395 # TODO: rename this function?
395 # TODO: rename this function?
396 tiprev = len(self) - 1
396 tiprev = len(self) - 1
397 if lrev != tiprev:
397 if lrev != tiprev:
398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
399 self._updatebranchcache(partial, ctxgen)
399 self._updatebranchcache(partial, ctxgen)
400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
401
401
402 return partial
402 return partial
403
403
404 def updatebranchcache(self):
404 def updatebranchcache(self):
405 tip = self.changelog.tip()
405 tip = self.changelog.tip()
406 if self._branchcache is not None and self._branchcachetip == tip:
406 if self._branchcache is not None and self._branchcachetip == tip:
407 return self._branchcache
407 return self._branchcache
408
408
409 oldtip = self._branchcachetip
409 oldtip = self._branchcachetip
410 self._branchcachetip = tip
410 self._branchcachetip = tip
411 if oldtip is None or oldtip not in self.changelog.nodemap:
411 if oldtip is None or oldtip not in self.changelog.nodemap:
412 partial, last, lrev = self._readbranchcache()
412 partial, last, lrev = self._readbranchcache()
413 else:
413 else:
414 lrev = self.changelog.rev(oldtip)
414 lrev = self.changelog.rev(oldtip)
415 partial = self._branchcache
415 partial = self._branchcache
416
416
417 self._branchtags(partial, lrev)
417 self._branchtags(partial, lrev)
418 # this private cache holds all heads (not just tips)
418 # this private cache holds all heads (not just tips)
419 self._branchcache = partial
419 self._branchcache = partial
420
420
421 def branchmap(self):
421 def branchmap(self):
422 '''returns a dictionary {branch: [branchheads]}'''
422 '''returns a dictionary {branch: [branchheads]}'''
423 self.updatebranchcache()
423 self.updatebranchcache()
424 return self._branchcache
424 return self._branchcache
425
425
426 def branchtags(self):
426 def branchtags(self):
427 '''return a dict where branch names map to the tipmost head of
427 '''return a dict where branch names map to the tipmost head of
428 the branch, open heads come before closed'''
428 the branch, open heads come before closed'''
429 bt = {}
429 bt = {}
430 for bn, heads in self.branchmap().iteritems():
430 for bn, heads in self.branchmap().iteritems():
431 tip = heads[-1]
431 tip = heads[-1]
432 for h in reversed(heads):
432 for h in reversed(heads):
433 if 'close' not in self.changelog.read(h)[5]:
433 if 'close' not in self.changelog.read(h)[5]:
434 tip = h
434 tip = h
435 break
435 break
436 bt[bn] = tip
436 bt[bn] = tip
437 return bt
437 return bt
438
438
439 def _readbranchcache(self):
439 def _readbranchcache(self):
440 partial = {}
440 partial = {}
441 try:
441 try:
442 f = self.opener("branchheads.cache")
442 f = self.opener(os.path.join("cache", "branchheads"))
443 lines = f.read().split('\n')
443 lines = f.read().split('\n')
444 f.close()
444 f.close()
445 except (IOError, OSError):
445 except (IOError, OSError):
446 return {}, nullid, nullrev
446 return {}, nullid, nullrev
447
447
448 try:
448 try:
449 last, lrev = lines.pop(0).split(" ", 1)
449 last, lrev = lines.pop(0).split(" ", 1)
450 last, lrev = bin(last), int(lrev)
450 last, lrev = bin(last), int(lrev)
451 if lrev >= len(self) or self[lrev].node() != last:
451 if lrev >= len(self) or self[lrev].node() != last:
452 # invalidate the cache
452 # invalidate the cache
453 raise ValueError('invalidating branch cache (tip differs)')
453 raise ValueError('invalidating branch cache (tip differs)')
454 for l in lines:
454 for l in lines:
455 if not l:
455 if not l:
456 continue
456 continue
457 node, label = l.split(" ", 1)
457 node, label = l.split(" ", 1)
458 label = encoding.tolocal(label.strip())
458 label = encoding.tolocal(label.strip())
459 partial.setdefault(label, []).append(bin(node))
459 partial.setdefault(label, []).append(bin(node))
460 except KeyboardInterrupt:
460 except KeyboardInterrupt:
461 raise
461 raise
462 except Exception, inst:
462 except Exception, inst:
463 if self.ui.debugflag:
463 if self.ui.debugflag:
464 self.ui.warn(str(inst), '\n')
464 self.ui.warn(str(inst), '\n')
465 partial, last, lrev = {}, nullid, nullrev
465 partial, last, lrev = {}, nullid, nullrev
466 return partial, last, lrev
466 return partial, last, lrev
467
467
468 def _writebranchcache(self, branches, tip, tiprev):
468 def _writebranchcache(self, branches, tip, tiprev):
469 try:
469 try:
470 f = self.opener("branchheads.cache", "w", atomictemp=True)
470 f = self.opener(os.path.join("cache", "branchheads"), "w",
471 atomictemp=True)
471 f.write("%s %s\n" % (hex(tip), tiprev))
472 f.write("%s %s\n" % (hex(tip), tiprev))
472 for label, nodes in branches.iteritems():
473 for label, nodes in branches.iteritems():
473 for node in nodes:
474 for node in nodes:
474 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
475 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
475 f.rename()
476 f.rename()
476 except (IOError, OSError):
477 except (IOError, OSError):
477 pass
478 pass
478
479
479 def _updatebranchcache(self, partial, ctxgen):
480 def _updatebranchcache(self, partial, ctxgen):
480 # collect new branch entries
481 # collect new branch entries
481 newbranches = {}
482 newbranches = {}
482 for c in ctxgen:
483 for c in ctxgen:
483 newbranches.setdefault(c.branch(), []).append(c.node())
484 newbranches.setdefault(c.branch(), []).append(c.node())
484 # if older branchheads are reachable from new ones, they aren't
485 # if older branchheads are reachable from new ones, they aren't
485 # really branchheads. Note checking parents is insufficient:
486 # really branchheads. Note checking parents is insufficient:
486 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
487 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
487 for branch, newnodes in newbranches.iteritems():
488 for branch, newnodes in newbranches.iteritems():
488 bheads = partial.setdefault(branch, [])
489 bheads = partial.setdefault(branch, [])
489 bheads.extend(newnodes)
490 bheads.extend(newnodes)
490 if len(bheads) <= 1:
491 if len(bheads) <= 1:
491 continue
492 continue
492 # starting from tip means fewer passes over reachable
493 # starting from tip means fewer passes over reachable
493 while newnodes:
494 while newnodes:
494 latest = newnodes.pop()
495 latest = newnodes.pop()
495 if latest not in bheads:
496 if latest not in bheads:
496 continue
497 continue
497 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
498 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
498 reachable = self.changelog.reachable(latest, minbhrev)
499 reachable = self.changelog.reachable(latest, minbhrev)
499 reachable.remove(latest)
500 reachable.remove(latest)
500 bheads = [b for b in bheads if b not in reachable]
501 bheads = [b for b in bheads if b not in reachable]
501 partial[branch] = bheads
502 partial[branch] = bheads
502
503
503 def lookup(self, key):
504 def lookup(self, key):
504 if isinstance(key, int):
505 if isinstance(key, int):
505 return self.changelog.node(key)
506 return self.changelog.node(key)
506 elif key == '.':
507 elif key == '.':
507 return self.dirstate.parents()[0]
508 return self.dirstate.parents()[0]
508 elif key == 'null':
509 elif key == 'null':
509 return nullid
510 return nullid
510 elif key == 'tip':
511 elif key == 'tip':
511 return self.changelog.tip()
512 return self.changelog.tip()
512 n = self.changelog._match(key)
513 n = self.changelog._match(key)
513 if n:
514 if n:
514 return n
515 return n
515 if key in self.tags():
516 if key in self.tags():
516 return self.tags()[key]
517 return self.tags()[key]
517 if key in self.branchtags():
518 if key in self.branchtags():
518 return self.branchtags()[key]
519 return self.branchtags()[key]
519 n = self.changelog._partialmatch(key)
520 n = self.changelog._partialmatch(key)
520 if n:
521 if n:
521 return n
522 return n
522
523
523 # can't find key, check if it might have come from damaged dirstate
524 # can't find key, check if it might have come from damaged dirstate
524 if key in self.dirstate.parents():
525 if key in self.dirstate.parents():
525 raise error.Abort(_("working directory has unknown parent '%s'!")
526 raise error.Abort(_("working directory has unknown parent '%s'!")
526 % short(key))
527 % short(key))
527 try:
528 try:
528 if len(key) == 20:
529 if len(key) == 20:
529 key = hex(key)
530 key = hex(key)
530 except:
531 except:
531 pass
532 pass
532 raise error.RepoLookupError(_("unknown revision '%s'") % key)
533 raise error.RepoLookupError(_("unknown revision '%s'") % key)
533
534
534 def lookupbranch(self, key, remote=None):
535 def lookupbranch(self, key, remote=None):
535 repo = remote or self
536 repo = remote or self
536 if key in repo.branchmap():
537 if key in repo.branchmap():
537 return key
538 return key
538
539
539 repo = (remote and remote.local()) and remote or self
540 repo = (remote and remote.local()) and remote or self
540 return repo[key].branch()
541 return repo[key].branch()
541
542
542 def local(self):
543 def local(self):
543 return True
544 return True
544
545
545 def join(self, f):
546 def join(self, f):
546 return os.path.join(self.path, f)
547 return os.path.join(self.path, f)
547
548
548 def wjoin(self, f):
549 def wjoin(self, f):
549 return os.path.join(self.root, f)
550 return os.path.join(self.root, f)
550
551
551 def file(self, f):
552 def file(self, f):
552 if f[0] == '/':
553 if f[0] == '/':
553 f = f[1:]
554 f = f[1:]
554 return filelog.filelog(self.sopener, f)
555 return filelog.filelog(self.sopener, f)
555
556
556 def changectx(self, changeid):
557 def changectx(self, changeid):
557 return self[changeid]
558 return self[changeid]
558
559
559 def parents(self, changeid=None):
560 def parents(self, changeid=None):
560 '''get list of changectxs for parents of changeid'''
561 '''get list of changectxs for parents of changeid'''
561 return self[changeid].parents()
562 return self[changeid].parents()
562
563
563 def filectx(self, path, changeid=None, fileid=None):
564 def filectx(self, path, changeid=None, fileid=None):
564 """changeid can be a changeset revision, node, or tag.
565 """changeid can be a changeset revision, node, or tag.
565 fileid can be a file revision or node."""
566 fileid can be a file revision or node."""
566 return context.filectx(self, path, changeid, fileid)
567 return context.filectx(self, path, changeid, fileid)
567
568
568 def getcwd(self):
569 def getcwd(self):
569 return self.dirstate.getcwd()
570 return self.dirstate.getcwd()
570
571
571 def pathto(self, f, cwd=None):
572 def pathto(self, f, cwd=None):
572 return self.dirstate.pathto(f, cwd)
573 return self.dirstate.pathto(f, cwd)
573
574
574 def wfile(self, f, mode='r'):
575 def wfile(self, f, mode='r'):
575 return self.wopener(f, mode)
576 return self.wopener(f, mode)
576
577
577 def _link(self, f):
578 def _link(self, f):
578 return os.path.islink(self.wjoin(f))
579 return os.path.islink(self.wjoin(f))
579
580
580 def _loadfilter(self, filter):
581 def _loadfilter(self, filter):
581 if filter not in self.filterpats:
582 if filter not in self.filterpats:
582 l = []
583 l = []
583 for pat, cmd in self.ui.configitems(filter):
584 for pat, cmd in self.ui.configitems(filter):
584 if cmd == '!':
585 if cmd == '!':
585 continue
586 continue
586 mf = matchmod.match(self.root, '', [pat])
587 mf = matchmod.match(self.root, '', [pat])
587 fn = None
588 fn = None
588 params = cmd
589 params = cmd
589 for name, filterfn in self._datafilters.iteritems():
590 for name, filterfn in self._datafilters.iteritems():
590 if cmd.startswith(name):
591 if cmd.startswith(name):
591 fn = filterfn
592 fn = filterfn
592 params = cmd[len(name):].lstrip()
593 params = cmd[len(name):].lstrip()
593 break
594 break
594 if not fn:
595 if not fn:
595 fn = lambda s, c, **kwargs: util.filter(s, c)
596 fn = lambda s, c, **kwargs: util.filter(s, c)
596 # Wrap old filters not supporting keyword arguments
597 # Wrap old filters not supporting keyword arguments
597 if not inspect.getargspec(fn)[2]:
598 if not inspect.getargspec(fn)[2]:
598 oldfn = fn
599 oldfn = fn
599 fn = lambda s, c, **kwargs: oldfn(s, c)
600 fn = lambda s, c, **kwargs: oldfn(s, c)
600 l.append((mf, fn, params))
601 l.append((mf, fn, params))
601 self.filterpats[filter] = l
602 self.filterpats[filter] = l
602 return self.filterpats[filter]
603 return self.filterpats[filter]
603
604
604 def _filter(self, filterpats, filename, data):
605 def _filter(self, filterpats, filename, data):
605 for mf, fn, cmd in filterpats:
606 for mf, fn, cmd in filterpats:
606 if mf(filename):
607 if mf(filename):
607 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
608 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
608 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
609 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
609 break
610 break
610
611
611 return data
612 return data
612
613
613 @propertycache
614 @propertycache
614 def _encodefilterpats(self):
615 def _encodefilterpats(self):
615 return self._loadfilter('encode')
616 return self._loadfilter('encode')
616
617
617 @propertycache
618 @propertycache
618 def _decodefilterpats(self):
619 def _decodefilterpats(self):
619 return self._loadfilter('decode')
620 return self._loadfilter('decode')
620
621
621 def adddatafilter(self, name, filter):
622 def adddatafilter(self, name, filter):
622 self._datafilters[name] = filter
623 self._datafilters[name] = filter
623
624
624 def wread(self, filename):
625 def wread(self, filename):
625 if self._link(filename):
626 if self._link(filename):
626 data = os.readlink(self.wjoin(filename))
627 data = os.readlink(self.wjoin(filename))
627 else:
628 else:
628 data = self.wopener(filename, 'r').read()
629 data = self.wopener(filename, 'r').read()
629 return self._filter(self._encodefilterpats, filename, data)
630 return self._filter(self._encodefilterpats, filename, data)
630
631
631 def wwrite(self, filename, data, flags):
632 def wwrite(self, filename, data, flags):
632 data = self._filter(self._decodefilterpats, filename, data)
633 data = self._filter(self._decodefilterpats, filename, data)
633 if 'l' in flags:
634 if 'l' in flags:
634 self.wopener.symlink(data, filename)
635 self.wopener.symlink(data, filename)
635 else:
636 else:
636 self.wopener(filename, 'w').write(data)
637 self.wopener(filename, 'w').write(data)
637 if 'x' in flags:
638 if 'x' in flags:
638 util.set_flags(self.wjoin(filename), False, True)
639 util.set_flags(self.wjoin(filename), False, True)
639
640
640 def wwritedata(self, filename, data):
641 def wwritedata(self, filename, data):
641 return self._filter(self._decodefilterpats, filename, data)
642 return self._filter(self._decodefilterpats, filename, data)
642
643
643 def transaction(self, desc):
644 def transaction(self, desc):
644 tr = self._transref and self._transref() or None
645 tr = self._transref and self._transref() or None
645 if tr and tr.running():
646 if tr and tr.running():
646 return tr.nest()
647 return tr.nest()
647
648
648 # abort here if the journal already exists
649 # abort here if the journal already exists
649 if os.path.exists(self.sjoin("journal")):
650 if os.path.exists(self.sjoin("journal")):
650 raise error.RepoError(
651 raise error.RepoError(
651 _("abandoned transaction found - run hg recover"))
652 _("abandoned transaction found - run hg recover"))
652
653
653 # save dirstate for rollback
654 # save dirstate for rollback
654 try:
655 try:
655 ds = self.opener("dirstate").read()
656 ds = self.opener("dirstate").read()
656 except IOError:
657 except IOError:
657 ds = ""
658 ds = ""
658 self.opener("journal.dirstate", "w").write(ds)
659 self.opener("journal.dirstate", "w").write(ds)
659 self.opener("journal.branch", "w").write(
660 self.opener("journal.branch", "w").write(
660 encoding.fromlocal(self.dirstate.branch()))
661 encoding.fromlocal(self.dirstate.branch()))
661 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
662 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
662
663
663 renames = [(self.sjoin("journal"), self.sjoin("undo")),
664 renames = [(self.sjoin("journal"), self.sjoin("undo")),
664 (self.join("journal.dirstate"), self.join("undo.dirstate")),
665 (self.join("journal.dirstate"), self.join("undo.dirstate")),
665 (self.join("journal.branch"), self.join("undo.branch")),
666 (self.join("journal.branch"), self.join("undo.branch")),
666 (self.join("journal.desc"), self.join("undo.desc"))]
667 (self.join("journal.desc"), self.join("undo.desc"))]
667 tr = transaction.transaction(self.ui.warn, self.sopener,
668 tr = transaction.transaction(self.ui.warn, self.sopener,
668 self.sjoin("journal"),
669 self.sjoin("journal"),
669 aftertrans(renames),
670 aftertrans(renames),
670 self.store.createmode)
671 self.store.createmode)
671 self._transref = weakref.ref(tr)
672 self._transref = weakref.ref(tr)
672 return tr
673 return tr
673
674
674 def recover(self):
675 def recover(self):
675 lock = self.lock()
676 lock = self.lock()
676 try:
677 try:
677 if os.path.exists(self.sjoin("journal")):
678 if os.path.exists(self.sjoin("journal")):
678 self.ui.status(_("rolling back interrupted transaction\n"))
679 self.ui.status(_("rolling back interrupted transaction\n"))
679 transaction.rollback(self.sopener, self.sjoin("journal"),
680 transaction.rollback(self.sopener, self.sjoin("journal"),
680 self.ui.warn)
681 self.ui.warn)
681 self.invalidate()
682 self.invalidate()
682 return True
683 return True
683 else:
684 else:
684 self.ui.warn(_("no interrupted transaction available\n"))
685 self.ui.warn(_("no interrupted transaction available\n"))
685 return False
686 return False
686 finally:
687 finally:
687 lock.release()
688 lock.release()
688
689
689 def rollback(self, dryrun=False):
690 def rollback(self, dryrun=False):
690 wlock = lock = None
691 wlock = lock = None
691 try:
692 try:
692 wlock = self.wlock()
693 wlock = self.wlock()
693 lock = self.lock()
694 lock = self.lock()
694 if os.path.exists(self.sjoin("undo")):
695 if os.path.exists(self.sjoin("undo")):
695 try:
696 try:
696 args = self.opener("undo.desc", "r").read().splitlines()
697 args = self.opener("undo.desc", "r").read().splitlines()
697 if len(args) >= 3 and self.ui.verbose:
698 if len(args) >= 3 and self.ui.verbose:
698 desc = _("rolling back to revision %s"
699 desc = _("rolling back to revision %s"
699 " (undo %s: %s)\n") % (
700 " (undo %s: %s)\n") % (
700 int(args[0]) - 1, args[1], args[2])
701 int(args[0]) - 1, args[1], args[2])
701 elif len(args) >= 2:
702 elif len(args) >= 2:
702 desc = _("rolling back to revision %s (undo %s)\n") % (
703 desc = _("rolling back to revision %s (undo %s)\n") % (
703 int(args[0]) - 1, args[1])
704 int(args[0]) - 1, args[1])
704 except IOError:
705 except IOError:
705 desc = _("rolling back unknown transaction\n")
706 desc = _("rolling back unknown transaction\n")
706 self.ui.status(desc)
707 self.ui.status(desc)
707 if dryrun:
708 if dryrun:
708 return
709 return
709 transaction.rollback(self.sopener, self.sjoin("undo"),
710 transaction.rollback(self.sopener, self.sjoin("undo"),
710 self.ui.warn)
711 self.ui.warn)
711 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
712 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
712 try:
713 try:
713 branch = self.opener("undo.branch").read()
714 branch = self.opener("undo.branch").read()
714 self.dirstate.setbranch(branch)
715 self.dirstate.setbranch(branch)
715 except IOError:
716 except IOError:
716 self.ui.warn(_("Named branch could not be reset, "
717 self.ui.warn(_("Named branch could not be reset, "
717 "current branch still is: %s\n")
718 "current branch still is: %s\n")
718 % self.dirstate.branch())
719 % self.dirstate.branch())
719 self.invalidate()
720 self.invalidate()
720 self.dirstate.invalidate()
721 self.dirstate.invalidate()
721 self.destroyed()
722 self.destroyed()
722 else:
723 else:
723 self.ui.warn(_("no rollback information available\n"))
724 self.ui.warn(_("no rollback information available\n"))
724 return 1
725 return 1
725 finally:
726 finally:
726 release(lock, wlock)
727 release(lock, wlock)
727
728
728 def invalidatecaches(self):
729 def invalidatecaches(self):
729 self._tags = None
730 self._tags = None
730 self._tagtypes = None
731 self._tagtypes = None
731 self.nodetagscache = None
732 self.nodetagscache = None
732 self._branchcache = None # in UTF-8
733 self._branchcache = None # in UTF-8
733 self._branchcachetip = None
734 self._branchcachetip = None
734
735
735 def invalidate(self):
736 def invalidate(self):
736 for a in ("changelog", "manifest"):
737 for a in ("changelog", "manifest"):
737 if a in self.__dict__:
738 if a in self.__dict__:
738 delattr(self, a)
739 delattr(self, a)
739 self.invalidatecaches()
740 self.invalidatecaches()
740
741
741 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
742 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
742 try:
743 try:
743 l = lock.lock(lockname, 0, releasefn, desc=desc)
744 l = lock.lock(lockname, 0, releasefn, desc=desc)
744 except error.LockHeld, inst:
745 except error.LockHeld, inst:
745 if not wait:
746 if not wait:
746 raise
747 raise
747 self.ui.warn(_("waiting for lock on %s held by %r\n") %
748 self.ui.warn(_("waiting for lock on %s held by %r\n") %
748 (desc, inst.locker))
749 (desc, inst.locker))
749 # default to 600 seconds timeout
750 # default to 600 seconds timeout
750 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
751 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
751 releasefn, desc=desc)
752 releasefn, desc=desc)
752 if acquirefn:
753 if acquirefn:
753 acquirefn()
754 acquirefn()
754 return l
755 return l
755
756
756 def lock(self, wait=True):
757 def lock(self, wait=True):
757 '''Lock the repository store (.hg/store) and return a weak reference
758 '''Lock the repository store (.hg/store) and return a weak reference
758 to the lock. Use this before modifying the store (e.g. committing or
759 to the lock. Use this before modifying the store (e.g. committing or
759 stripping). If you are opening a transaction, get a lock as well.)'''
760 stripping). If you are opening a transaction, get a lock as well.)'''
760 l = self._lockref and self._lockref()
761 l = self._lockref and self._lockref()
761 if l is not None and l.held:
762 if l is not None and l.held:
762 l.lock()
763 l.lock()
763 return l
764 return l
764
765
765 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
766 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
766 _('repository %s') % self.origroot)
767 _('repository %s') % self.origroot)
767 self._lockref = weakref.ref(l)
768 self._lockref = weakref.ref(l)
768 return l
769 return l
769
770
770 def wlock(self, wait=True):
771 def wlock(self, wait=True):
771 '''Lock the non-store parts of the repository (everything under
772 '''Lock the non-store parts of the repository (everything under
772 .hg except .hg/store) and return a weak reference to the lock.
773 .hg except .hg/store) and return a weak reference to the lock.
773 Use this before modifying files in .hg.'''
774 Use this before modifying files in .hg.'''
774 l = self._wlockref and self._wlockref()
775 l = self._wlockref and self._wlockref()
775 if l is not None and l.held:
776 if l is not None and l.held:
776 l.lock()
777 l.lock()
777 return l
778 return l
778
779
779 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
780 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
780 self.dirstate.invalidate, _('working directory of %s') %
781 self.dirstate.invalidate, _('working directory of %s') %
781 self.origroot)
782 self.origroot)
782 self._wlockref = weakref.ref(l)
783 self._wlockref = weakref.ref(l)
783 return l
784 return l
784
785
785 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
786 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
786 """
787 """
787 commit an individual file as part of a larger transaction
788 commit an individual file as part of a larger transaction
788 """
789 """
789
790
790 fname = fctx.path()
791 fname = fctx.path()
791 text = fctx.data()
792 text = fctx.data()
792 flog = self.file(fname)
793 flog = self.file(fname)
793 fparent1 = manifest1.get(fname, nullid)
794 fparent1 = manifest1.get(fname, nullid)
794 fparent2 = fparent2o = manifest2.get(fname, nullid)
795 fparent2 = fparent2o = manifest2.get(fname, nullid)
795
796
796 meta = {}
797 meta = {}
797 copy = fctx.renamed()
798 copy = fctx.renamed()
798 if copy and copy[0] != fname:
799 if copy and copy[0] != fname:
799 # Mark the new revision of this file as a copy of another
800 # Mark the new revision of this file as a copy of another
800 # file. This copy data will effectively act as a parent
801 # file. This copy data will effectively act as a parent
801 # of this new revision. If this is a merge, the first
802 # of this new revision. If this is a merge, the first
802 # parent will be the nullid (meaning "look up the copy data")
803 # parent will be the nullid (meaning "look up the copy data")
803 # and the second one will be the other parent. For example:
804 # and the second one will be the other parent. For example:
804 #
805 #
805 # 0 --- 1 --- 3 rev1 changes file foo
806 # 0 --- 1 --- 3 rev1 changes file foo
806 # \ / rev2 renames foo to bar and changes it
807 # \ / rev2 renames foo to bar and changes it
807 # \- 2 -/ rev3 should have bar with all changes and
808 # \- 2 -/ rev3 should have bar with all changes and
808 # should record that bar descends from
809 # should record that bar descends from
809 # bar in rev2 and foo in rev1
810 # bar in rev2 and foo in rev1
810 #
811 #
811 # this allows this merge to succeed:
812 # this allows this merge to succeed:
812 #
813 #
813 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
814 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
814 # \ / merging rev3 and rev4 should use bar@rev2
815 # \ / merging rev3 and rev4 should use bar@rev2
815 # \- 2 --- 4 as the merge base
816 # \- 2 --- 4 as the merge base
816 #
817 #
817
818
818 cfname = copy[0]
819 cfname = copy[0]
819 crev = manifest1.get(cfname)
820 crev = manifest1.get(cfname)
820 newfparent = fparent2
821 newfparent = fparent2
821
822
822 if manifest2: # branch merge
823 if manifest2: # branch merge
823 if fparent2 == nullid or crev is None: # copied on remote side
824 if fparent2 == nullid or crev is None: # copied on remote side
824 if cfname in manifest2:
825 if cfname in manifest2:
825 crev = manifest2[cfname]
826 crev = manifest2[cfname]
826 newfparent = fparent1
827 newfparent = fparent1
827
828
828 # find source in nearest ancestor if we've lost track
829 # find source in nearest ancestor if we've lost track
829 if not crev:
830 if not crev:
830 self.ui.debug(" %s: searching for copy revision for %s\n" %
831 self.ui.debug(" %s: searching for copy revision for %s\n" %
831 (fname, cfname))
832 (fname, cfname))
832 for ancestor in self[None].ancestors():
833 for ancestor in self[None].ancestors():
833 if cfname in ancestor:
834 if cfname in ancestor:
834 crev = ancestor[cfname].filenode()
835 crev = ancestor[cfname].filenode()
835 break
836 break
836
837
837 if crev:
838 if crev:
838 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
839 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
839 meta["copy"] = cfname
840 meta["copy"] = cfname
840 meta["copyrev"] = hex(crev)
841 meta["copyrev"] = hex(crev)
841 fparent1, fparent2 = nullid, newfparent
842 fparent1, fparent2 = nullid, newfparent
842 else:
843 else:
843 self.ui.warn(_("warning: can't find ancestor for '%s' "
844 self.ui.warn(_("warning: can't find ancestor for '%s' "
844 "copied from '%s'!\n") % (fname, cfname))
845 "copied from '%s'!\n") % (fname, cfname))
845
846
846 elif fparent2 != nullid:
847 elif fparent2 != nullid:
847 # is one parent an ancestor of the other?
848 # is one parent an ancestor of the other?
848 fparentancestor = flog.ancestor(fparent1, fparent2)
849 fparentancestor = flog.ancestor(fparent1, fparent2)
849 if fparentancestor == fparent1:
850 if fparentancestor == fparent1:
850 fparent1, fparent2 = fparent2, nullid
851 fparent1, fparent2 = fparent2, nullid
851 elif fparentancestor == fparent2:
852 elif fparentancestor == fparent2:
852 fparent2 = nullid
853 fparent2 = nullid
853
854
854 # is the file changed?
855 # is the file changed?
855 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
856 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
856 changelist.append(fname)
857 changelist.append(fname)
857 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
858 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
858
859
859 # are just the flags changed during merge?
860 # are just the flags changed during merge?
860 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
861 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
861 changelist.append(fname)
862 changelist.append(fname)
862
863
863 return fparent1
864 return fparent1
864
865
865 def commit(self, text="", user=None, date=None, match=None, force=False,
866 def commit(self, text="", user=None, date=None, match=None, force=False,
866 editor=False, extra={}):
867 editor=False, extra={}):
867 """Add a new revision to current repository.
868 """Add a new revision to current repository.
868
869
869 Revision information is gathered from the working directory,
870 Revision information is gathered from the working directory,
870 match can be used to filter the committed files. If editor is
871 match can be used to filter the committed files. If editor is
871 supplied, it is called to get a commit message.
872 supplied, it is called to get a commit message.
872 """
873 """
873
874
874 def fail(f, msg):
875 def fail(f, msg):
875 raise util.Abort('%s: %s' % (f, msg))
876 raise util.Abort('%s: %s' % (f, msg))
876
877
877 if not match:
878 if not match:
878 match = matchmod.always(self.root, '')
879 match = matchmod.always(self.root, '')
879
880
880 if not force:
881 if not force:
881 vdirs = []
882 vdirs = []
882 match.dir = vdirs.append
883 match.dir = vdirs.append
883 match.bad = fail
884 match.bad = fail
884
885
885 wlock = self.wlock()
886 wlock = self.wlock()
886 try:
887 try:
887 wctx = self[None]
888 wctx = self[None]
888 merge = len(wctx.parents()) > 1
889 merge = len(wctx.parents()) > 1
889
890
890 if (not force and merge and match and
891 if (not force and merge and match and
891 (match.files() or match.anypats())):
892 (match.files() or match.anypats())):
892 raise util.Abort(_('cannot partially commit a merge '
893 raise util.Abort(_('cannot partially commit a merge '
893 '(do not specify files or patterns)'))
894 '(do not specify files or patterns)'))
894
895
895 changes = self.status(match=match, clean=force)
896 changes = self.status(match=match, clean=force)
896 if force:
897 if force:
897 changes[0].extend(changes[6]) # mq may commit unchanged files
898 changes[0].extend(changes[6]) # mq may commit unchanged files
898
899
899 # check subrepos
900 # check subrepos
900 subs = []
901 subs = []
901 removedsubs = set()
902 removedsubs = set()
902 for p in wctx.parents():
903 for p in wctx.parents():
903 removedsubs.update(s for s in p.substate if match(s))
904 removedsubs.update(s for s in p.substate if match(s))
904 for s in wctx.substate:
905 for s in wctx.substate:
905 removedsubs.discard(s)
906 removedsubs.discard(s)
906 if match(s) and wctx.sub(s).dirty():
907 if match(s) and wctx.sub(s).dirty():
907 subs.append(s)
908 subs.append(s)
908 if (subs or removedsubs):
909 if (subs or removedsubs):
909 if (not match('.hgsub') and
910 if (not match('.hgsub') and
910 '.hgsub' in (wctx.modified() + wctx.added())):
911 '.hgsub' in (wctx.modified() + wctx.added())):
911 raise util.Abort(_("can't commit subrepos without .hgsub"))
912 raise util.Abort(_("can't commit subrepos without .hgsub"))
912 if '.hgsubstate' not in changes[0]:
913 if '.hgsubstate' not in changes[0]:
913 changes[0].insert(0, '.hgsubstate')
914 changes[0].insert(0, '.hgsubstate')
914
915
915 # make sure all explicit patterns are matched
916 # make sure all explicit patterns are matched
916 if not force and match.files():
917 if not force and match.files():
917 matched = set(changes[0] + changes[1] + changes[2])
918 matched = set(changes[0] + changes[1] + changes[2])
918
919
919 for f in match.files():
920 for f in match.files():
920 if f == '.' or f in matched or f in wctx.substate:
921 if f == '.' or f in matched or f in wctx.substate:
921 continue
922 continue
922 if f in changes[3]: # missing
923 if f in changes[3]: # missing
923 fail(f, _('file not found!'))
924 fail(f, _('file not found!'))
924 if f in vdirs: # visited directory
925 if f in vdirs: # visited directory
925 d = f + '/'
926 d = f + '/'
926 for mf in matched:
927 for mf in matched:
927 if mf.startswith(d):
928 if mf.startswith(d):
928 break
929 break
929 else:
930 else:
930 fail(f, _("no match under directory!"))
931 fail(f, _("no match under directory!"))
931 elif f not in self.dirstate:
932 elif f not in self.dirstate:
932 fail(f, _("file not tracked!"))
933 fail(f, _("file not tracked!"))
933
934
934 if (not force and not extra.get("close") and not merge
935 if (not force and not extra.get("close") and not merge
935 and not (changes[0] or changes[1] or changes[2])
936 and not (changes[0] or changes[1] or changes[2])
936 and wctx.branch() == wctx.p1().branch()):
937 and wctx.branch() == wctx.p1().branch()):
937 return None
938 return None
938
939
939 ms = mergemod.mergestate(self)
940 ms = mergemod.mergestate(self)
940 for f in changes[0]:
941 for f in changes[0]:
941 if f in ms and ms[f] == 'u':
942 if f in ms and ms[f] == 'u':
942 raise util.Abort(_("unresolved merge conflicts "
943 raise util.Abort(_("unresolved merge conflicts "
943 "(see hg resolve)"))
944 "(see hg resolve)"))
944
945
945 cctx = context.workingctx(self, text, user, date, extra, changes)
946 cctx = context.workingctx(self, text, user, date, extra, changes)
946 if editor:
947 if editor:
947 cctx._text = editor(self, cctx, subs)
948 cctx._text = editor(self, cctx, subs)
948 edited = (text != cctx._text)
949 edited = (text != cctx._text)
949
950
950 # commit subs
951 # commit subs
951 if subs or removedsubs:
952 if subs or removedsubs:
952 state = wctx.substate.copy()
953 state = wctx.substate.copy()
953 for s in sorted(subs):
954 for s in sorted(subs):
954 sub = wctx.sub(s)
955 sub = wctx.sub(s)
955 self.ui.status(_('committing subrepository %s\n') %
956 self.ui.status(_('committing subrepository %s\n') %
956 subrepo.subrelpath(sub))
957 subrepo.subrelpath(sub))
957 sr = sub.commit(cctx._text, user, date)
958 sr = sub.commit(cctx._text, user, date)
958 state[s] = (state[s][0], sr)
959 state[s] = (state[s][0], sr)
959 subrepo.writestate(self, state)
960 subrepo.writestate(self, state)
960
961
961 # Save commit message in case this transaction gets rolled back
962 # Save commit message in case this transaction gets rolled back
962 # (e.g. by a pretxncommit hook). Leave the content alone on
963 # (e.g. by a pretxncommit hook). Leave the content alone on
963 # the assumption that the user will use the same editor again.
964 # the assumption that the user will use the same editor again.
964 msgfile = self.opener('last-message.txt', 'wb')
965 msgfile = self.opener('last-message.txt', 'wb')
965 msgfile.write(cctx._text)
966 msgfile.write(cctx._text)
966 msgfile.close()
967 msgfile.close()
967
968
968 p1, p2 = self.dirstate.parents()
969 p1, p2 = self.dirstate.parents()
969 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
970 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
970 try:
971 try:
971 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
972 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
972 ret = self.commitctx(cctx, True)
973 ret = self.commitctx(cctx, True)
973 except:
974 except:
974 if edited:
975 if edited:
975 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
976 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
976 self.ui.write(
977 self.ui.write(
977 _('note: commit message saved in %s\n') % msgfn)
978 _('note: commit message saved in %s\n') % msgfn)
978 raise
979 raise
979
980
980 # update dirstate and mergestate
981 # update dirstate and mergestate
981 for f in changes[0] + changes[1]:
982 for f in changes[0] + changes[1]:
982 self.dirstate.normal(f)
983 self.dirstate.normal(f)
983 for f in changes[2]:
984 for f in changes[2]:
984 self.dirstate.forget(f)
985 self.dirstate.forget(f)
985 self.dirstate.setparents(ret)
986 self.dirstate.setparents(ret)
986 ms.reset()
987 ms.reset()
987 finally:
988 finally:
988 wlock.release()
989 wlock.release()
989
990
990 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
991 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
991 return ret
992 return ret
992
993
993 def commitctx(self, ctx, error=False):
994 def commitctx(self, ctx, error=False):
994 """Add a new revision to current repository.
995 """Add a new revision to current repository.
995 Revision information is passed via the context argument.
996 Revision information is passed via the context argument.
996 """
997 """
997
998
998 tr = lock = None
999 tr = lock = None
999 removed = list(ctx.removed())
1000 removed = list(ctx.removed())
1000 p1, p2 = ctx.p1(), ctx.p2()
1001 p1, p2 = ctx.p1(), ctx.p2()
1001 m1 = p1.manifest().copy()
1002 m1 = p1.manifest().copy()
1002 m2 = p2.manifest()
1003 m2 = p2.manifest()
1003 user = ctx.user()
1004 user = ctx.user()
1004
1005
1005 lock = self.lock()
1006 lock = self.lock()
1006 try:
1007 try:
1007 tr = self.transaction("commit")
1008 tr = self.transaction("commit")
1008 trp = weakref.proxy(tr)
1009 trp = weakref.proxy(tr)
1009
1010
1010 # check in files
1011 # check in files
1011 new = {}
1012 new = {}
1012 changed = []
1013 changed = []
1013 linkrev = len(self)
1014 linkrev = len(self)
1014 for f in sorted(ctx.modified() + ctx.added()):
1015 for f in sorted(ctx.modified() + ctx.added()):
1015 self.ui.note(f + "\n")
1016 self.ui.note(f + "\n")
1016 try:
1017 try:
1017 fctx = ctx[f]
1018 fctx = ctx[f]
1018 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1019 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1019 changed)
1020 changed)
1020 m1.set(f, fctx.flags())
1021 m1.set(f, fctx.flags())
1021 except OSError, inst:
1022 except OSError, inst:
1022 self.ui.warn(_("trouble committing %s!\n") % f)
1023 self.ui.warn(_("trouble committing %s!\n") % f)
1023 raise
1024 raise
1024 except IOError, inst:
1025 except IOError, inst:
1025 errcode = getattr(inst, 'errno', errno.ENOENT)
1026 errcode = getattr(inst, 'errno', errno.ENOENT)
1026 if error or errcode and errcode != errno.ENOENT:
1027 if error or errcode and errcode != errno.ENOENT:
1027 self.ui.warn(_("trouble committing %s!\n") % f)
1028 self.ui.warn(_("trouble committing %s!\n") % f)
1028 raise
1029 raise
1029 else:
1030 else:
1030 removed.append(f)
1031 removed.append(f)
1031
1032
1032 # update manifest
1033 # update manifest
1033 m1.update(new)
1034 m1.update(new)
1034 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1035 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1035 drop = [f for f in removed if f in m1]
1036 drop = [f for f in removed if f in m1]
1036 for f in drop:
1037 for f in drop:
1037 del m1[f]
1038 del m1[f]
1038 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1039 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1039 p2.manifestnode(), (new, drop))
1040 p2.manifestnode(), (new, drop))
1040
1041
1041 # update changelog
1042 # update changelog
1042 self.changelog.delayupdate()
1043 self.changelog.delayupdate()
1043 n = self.changelog.add(mn, changed + removed, ctx.description(),
1044 n = self.changelog.add(mn, changed + removed, ctx.description(),
1044 trp, p1.node(), p2.node(),
1045 trp, p1.node(), p2.node(),
1045 user, ctx.date(), ctx.extra().copy())
1046 user, ctx.date(), ctx.extra().copy())
1046 p = lambda: self.changelog.writepending() and self.root or ""
1047 p = lambda: self.changelog.writepending() and self.root or ""
1047 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1048 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1048 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1049 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1049 parent2=xp2, pending=p)
1050 parent2=xp2, pending=p)
1050 self.changelog.finalize(trp)
1051 self.changelog.finalize(trp)
1051 tr.close()
1052 tr.close()
1052
1053
1053 if self._branchcache:
1054 if self._branchcache:
1054 self.updatebranchcache()
1055 self.updatebranchcache()
1055 return n
1056 return n
1056 finally:
1057 finally:
1057 if tr:
1058 if tr:
1058 tr.release()
1059 tr.release()
1059 lock.release()
1060 lock.release()
1060
1061
1061 def destroyed(self):
1062 def destroyed(self):
1062 '''Inform the repository that nodes have been destroyed.
1063 '''Inform the repository that nodes have been destroyed.
1063 Intended for use by strip and rollback, so there's a common
1064 Intended for use by strip and rollback, so there's a common
1064 place for anything that has to be done after destroying history.'''
1065 place for anything that has to be done after destroying history.'''
1065 # XXX it might be nice if we could take the list of destroyed
1066 # XXX it might be nice if we could take the list of destroyed
1066 # nodes, but I don't see an easy way for rollback() to do that
1067 # nodes, but I don't see an easy way for rollback() to do that
1067
1068
1068 # Ensure the persistent tag cache is updated. Doing it now
1069 # Ensure the persistent tag cache is updated. Doing it now
1069 # means that the tag cache only has to worry about destroyed
1070 # means that the tag cache only has to worry about destroyed
1070 # heads immediately after a strip/rollback. That in turn
1071 # heads immediately after a strip/rollback. That in turn
1071 # guarantees that "cachetip == currenttip" (comparing both rev
1072 # guarantees that "cachetip == currenttip" (comparing both rev
1072 # and node) always means no nodes have been added or destroyed.
1073 # and node) always means no nodes have been added or destroyed.
1073
1074
1074 # XXX this is suboptimal when qrefresh'ing: we strip the current
1075 # XXX this is suboptimal when qrefresh'ing: we strip the current
1075 # head, refresh the tag cache, then immediately add a new head.
1076 # head, refresh the tag cache, then immediately add a new head.
1076 # But I think doing it this way is necessary for the "instant
1077 # But I think doing it this way is necessary for the "instant
1077 # tag cache retrieval" case to work.
1078 # tag cache retrieval" case to work.
1078 self.invalidatecaches()
1079 self.invalidatecaches()
1079
1080
1080 def walk(self, match, node=None):
1081 def walk(self, match, node=None):
1081 '''
1082 '''
1082 walk recursively through the directory tree or a given
1083 walk recursively through the directory tree or a given
1083 changeset, finding all files matched by the match
1084 changeset, finding all files matched by the match
1084 function
1085 function
1085 '''
1086 '''
1086 return self[node].walk(match)
1087 return self[node].walk(match)
1087
1088
1088 def status(self, node1='.', node2=None, match=None,
1089 def status(self, node1='.', node2=None, match=None,
1089 ignored=False, clean=False, unknown=False,
1090 ignored=False, clean=False, unknown=False,
1090 listsubrepos=False):
1091 listsubrepos=False):
1091 """return status of files between two nodes or node and working directory
1092 """return status of files between two nodes or node and working directory
1092
1093
1093 If node1 is None, use the first dirstate parent instead.
1094 If node1 is None, use the first dirstate parent instead.
1094 If node2 is None, compare node1 with working directory.
1095 If node2 is None, compare node1 with working directory.
1095 """
1096 """
1096
1097
1097 def mfmatches(ctx):
1098 def mfmatches(ctx):
1098 mf = ctx.manifest().copy()
1099 mf = ctx.manifest().copy()
1099 for fn in mf.keys():
1100 for fn in mf.keys():
1100 if not match(fn):
1101 if not match(fn):
1101 del mf[fn]
1102 del mf[fn]
1102 return mf
1103 return mf
1103
1104
1104 if isinstance(node1, context.changectx):
1105 if isinstance(node1, context.changectx):
1105 ctx1 = node1
1106 ctx1 = node1
1106 else:
1107 else:
1107 ctx1 = self[node1]
1108 ctx1 = self[node1]
1108 if isinstance(node2, context.changectx):
1109 if isinstance(node2, context.changectx):
1109 ctx2 = node2
1110 ctx2 = node2
1110 else:
1111 else:
1111 ctx2 = self[node2]
1112 ctx2 = self[node2]
1112
1113
1113 working = ctx2.rev() is None
1114 working = ctx2.rev() is None
1114 parentworking = working and ctx1 == self['.']
1115 parentworking = working and ctx1 == self['.']
1115 match = match or matchmod.always(self.root, self.getcwd())
1116 match = match or matchmod.always(self.root, self.getcwd())
1116 listignored, listclean, listunknown = ignored, clean, unknown
1117 listignored, listclean, listunknown = ignored, clean, unknown
1117
1118
1118 # load earliest manifest first for caching reasons
1119 # load earliest manifest first for caching reasons
1119 if not working and ctx2.rev() < ctx1.rev():
1120 if not working and ctx2.rev() < ctx1.rev():
1120 ctx2.manifest()
1121 ctx2.manifest()
1121
1122
1122 if not parentworking:
1123 if not parentworking:
1123 def bad(f, msg):
1124 def bad(f, msg):
1124 if f not in ctx1:
1125 if f not in ctx1:
1125 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1126 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1126 match.bad = bad
1127 match.bad = bad
1127
1128
1128 if working: # we need to scan the working dir
1129 if working: # we need to scan the working dir
1129 subrepos = []
1130 subrepos = []
1130 if '.hgsub' in self.dirstate:
1131 if '.hgsub' in self.dirstate:
1131 subrepos = ctx1.substate.keys()
1132 subrepos = ctx1.substate.keys()
1132 s = self.dirstate.status(match, subrepos, listignored,
1133 s = self.dirstate.status(match, subrepos, listignored,
1133 listclean, listunknown)
1134 listclean, listunknown)
1134 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1135 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1135
1136
1136 # check for any possibly clean files
1137 # check for any possibly clean files
1137 if parentworking and cmp:
1138 if parentworking and cmp:
1138 fixup = []
1139 fixup = []
1139 # do a full compare of any files that might have changed
1140 # do a full compare of any files that might have changed
1140 for f in sorted(cmp):
1141 for f in sorted(cmp):
1141 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1142 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1142 or ctx1[f].cmp(ctx2[f])):
1143 or ctx1[f].cmp(ctx2[f])):
1143 modified.append(f)
1144 modified.append(f)
1144 else:
1145 else:
1145 fixup.append(f)
1146 fixup.append(f)
1146
1147
1147 # update dirstate for files that are actually clean
1148 # update dirstate for files that are actually clean
1148 if fixup:
1149 if fixup:
1149 if listclean:
1150 if listclean:
1150 clean += fixup
1151 clean += fixup
1151
1152
1152 try:
1153 try:
1153 # updating the dirstate is optional
1154 # updating the dirstate is optional
1154 # so we don't wait on the lock
1155 # so we don't wait on the lock
1155 wlock = self.wlock(False)
1156 wlock = self.wlock(False)
1156 try:
1157 try:
1157 for f in fixup:
1158 for f in fixup:
1158 self.dirstate.normal(f)
1159 self.dirstate.normal(f)
1159 finally:
1160 finally:
1160 wlock.release()
1161 wlock.release()
1161 except error.LockError:
1162 except error.LockError:
1162 pass
1163 pass
1163
1164
1164 if not parentworking:
1165 if not parentworking:
1165 mf1 = mfmatches(ctx1)
1166 mf1 = mfmatches(ctx1)
1166 if working:
1167 if working:
1167 # we are comparing working dir against non-parent
1168 # we are comparing working dir against non-parent
1168 # generate a pseudo-manifest for the working dir
1169 # generate a pseudo-manifest for the working dir
1169 mf2 = mfmatches(self['.'])
1170 mf2 = mfmatches(self['.'])
1170 for f in cmp + modified + added:
1171 for f in cmp + modified + added:
1171 mf2[f] = None
1172 mf2[f] = None
1172 mf2.set(f, ctx2.flags(f))
1173 mf2.set(f, ctx2.flags(f))
1173 for f in removed:
1174 for f in removed:
1174 if f in mf2:
1175 if f in mf2:
1175 del mf2[f]
1176 del mf2[f]
1176 else:
1177 else:
1177 # we are comparing two revisions
1178 # we are comparing two revisions
1178 deleted, unknown, ignored = [], [], []
1179 deleted, unknown, ignored = [], [], []
1179 mf2 = mfmatches(ctx2)
1180 mf2 = mfmatches(ctx2)
1180
1181
1181 modified, added, clean = [], [], []
1182 modified, added, clean = [], [], []
1182 for fn in mf2:
1183 for fn in mf2:
1183 if fn in mf1:
1184 if fn in mf1:
1184 if (mf1.flags(fn) != mf2.flags(fn) or
1185 if (mf1.flags(fn) != mf2.flags(fn) or
1185 (mf1[fn] != mf2[fn] and
1186 (mf1[fn] != mf2[fn] and
1186 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1187 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1187 modified.append(fn)
1188 modified.append(fn)
1188 elif listclean:
1189 elif listclean:
1189 clean.append(fn)
1190 clean.append(fn)
1190 del mf1[fn]
1191 del mf1[fn]
1191 else:
1192 else:
1192 added.append(fn)
1193 added.append(fn)
1193 removed = mf1.keys()
1194 removed = mf1.keys()
1194
1195
1195 r = modified, added, removed, deleted, unknown, ignored, clean
1196 r = modified, added, removed, deleted, unknown, ignored, clean
1196
1197
1197 if listsubrepos:
1198 if listsubrepos:
1198 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1199 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1199 if working:
1200 if working:
1200 rev2 = None
1201 rev2 = None
1201 else:
1202 else:
1202 rev2 = ctx2.substate[subpath][1]
1203 rev2 = ctx2.substate[subpath][1]
1203 try:
1204 try:
1204 submatch = matchmod.narrowmatcher(subpath, match)
1205 submatch = matchmod.narrowmatcher(subpath, match)
1205 s = sub.status(rev2, match=submatch, ignored=listignored,
1206 s = sub.status(rev2, match=submatch, ignored=listignored,
1206 clean=listclean, unknown=listunknown,
1207 clean=listclean, unknown=listunknown,
1207 listsubrepos=True)
1208 listsubrepos=True)
1208 for rfiles, sfiles in zip(r, s):
1209 for rfiles, sfiles in zip(r, s):
1209 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1210 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1210 except error.LookupError:
1211 except error.LookupError:
1211 self.ui.status(_("skipping missing subrepository: %s\n")
1212 self.ui.status(_("skipping missing subrepository: %s\n")
1212 % subpath)
1213 % subpath)
1213
1214
1214 [l.sort() for l in r]
1215 [l.sort() for l in r]
1215 return r
1216 return r
1216
1217
1217 def heads(self, start=None):
1218 def heads(self, start=None):
1218 heads = self.changelog.heads(start)
1219 heads = self.changelog.heads(start)
1219 # sort the output in rev descending order
1220 # sort the output in rev descending order
1220 return sorted(heads, key=self.changelog.rev, reverse=True)
1221 return sorted(heads, key=self.changelog.rev, reverse=True)
1221
1222
1222 def branchheads(self, branch=None, start=None, closed=False):
1223 def branchheads(self, branch=None, start=None, closed=False):
1223 '''return a (possibly filtered) list of heads for the given branch
1224 '''return a (possibly filtered) list of heads for the given branch
1224
1225
1225 Heads are returned in topological order, from newest to oldest.
1226 Heads are returned in topological order, from newest to oldest.
1226 If branch is None, use the dirstate branch.
1227 If branch is None, use the dirstate branch.
1227 If start is not None, return only heads reachable from start.
1228 If start is not None, return only heads reachable from start.
1228 If closed is True, return heads that are marked as closed as well.
1229 If closed is True, return heads that are marked as closed as well.
1229 '''
1230 '''
1230 if branch is None:
1231 if branch is None:
1231 branch = self[None].branch()
1232 branch = self[None].branch()
1232 branches = self.branchmap()
1233 branches = self.branchmap()
1233 if branch not in branches:
1234 if branch not in branches:
1234 return []
1235 return []
1235 # the cache returns heads ordered lowest to highest
1236 # the cache returns heads ordered lowest to highest
1236 bheads = list(reversed(branches[branch]))
1237 bheads = list(reversed(branches[branch]))
1237 if start is not None:
1238 if start is not None:
1238 # filter out the heads that cannot be reached from startrev
1239 # filter out the heads that cannot be reached from startrev
1239 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1240 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1240 bheads = [h for h in bheads if h in fbheads]
1241 bheads = [h for h in bheads if h in fbheads]
1241 if not closed:
1242 if not closed:
1242 bheads = [h for h in bheads if
1243 bheads = [h for h in bheads if
1243 ('close' not in self.changelog.read(h)[5])]
1244 ('close' not in self.changelog.read(h)[5])]
1244 return bheads
1245 return bheads
1245
1246
1246 def branches(self, nodes):
1247 def branches(self, nodes):
1247 if not nodes:
1248 if not nodes:
1248 nodes = [self.changelog.tip()]
1249 nodes = [self.changelog.tip()]
1249 b = []
1250 b = []
1250 for n in nodes:
1251 for n in nodes:
1251 t = n
1252 t = n
1252 while 1:
1253 while 1:
1253 p = self.changelog.parents(n)
1254 p = self.changelog.parents(n)
1254 if p[1] != nullid or p[0] == nullid:
1255 if p[1] != nullid or p[0] == nullid:
1255 b.append((t, n, p[0], p[1]))
1256 b.append((t, n, p[0], p[1]))
1256 break
1257 break
1257 n = p[0]
1258 n = p[0]
1258 return b
1259 return b
1259
1260
1260 def between(self, pairs):
1261 def between(self, pairs):
1261 r = []
1262 r = []
1262
1263
1263 for top, bottom in pairs:
1264 for top, bottom in pairs:
1264 n, l, i = top, [], 0
1265 n, l, i = top, [], 0
1265 f = 1
1266 f = 1
1266
1267
1267 while n != bottom and n != nullid:
1268 while n != bottom and n != nullid:
1268 p = self.changelog.parents(n)[0]
1269 p = self.changelog.parents(n)[0]
1269 if i == f:
1270 if i == f:
1270 l.append(n)
1271 l.append(n)
1271 f = f * 2
1272 f = f * 2
1272 n = p
1273 n = p
1273 i += 1
1274 i += 1
1274
1275
1275 r.append(l)
1276 r.append(l)
1276
1277
1277 return r
1278 return r
1278
1279
1279 def pull(self, remote, heads=None, force=False):
1280 def pull(self, remote, heads=None, force=False):
1280 lock = self.lock()
1281 lock = self.lock()
1281 try:
1282 try:
1282 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1283 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1283 force=force)
1284 force=force)
1284 common, fetch, rheads = tmp
1285 common, fetch, rheads = tmp
1285 if not fetch:
1286 if not fetch:
1286 self.ui.status(_("no changes found\n"))
1287 self.ui.status(_("no changes found\n"))
1287 return 0
1288 return 0
1288
1289
1289 if heads is None and fetch == [nullid]:
1290 if heads is None and fetch == [nullid]:
1290 self.ui.status(_("requesting all changes\n"))
1291 self.ui.status(_("requesting all changes\n"))
1291 elif heads is None and remote.capable('changegroupsubset'):
1292 elif heads is None and remote.capable('changegroupsubset'):
1292 # issue1320, avoid a race if remote changed after discovery
1293 # issue1320, avoid a race if remote changed after discovery
1293 heads = rheads
1294 heads = rheads
1294
1295
1295 if heads is None:
1296 if heads is None:
1296 cg = remote.changegroup(fetch, 'pull')
1297 cg = remote.changegroup(fetch, 'pull')
1297 else:
1298 else:
1298 if not remote.capable('changegroupsubset'):
1299 if not remote.capable('changegroupsubset'):
1299 raise util.Abort(_("partial pull cannot be done because "
1300 raise util.Abort(_("partial pull cannot be done because "
1300 "other repository doesn't support "
1301 "other repository doesn't support "
1301 "changegroupsubset."))
1302 "changegroupsubset."))
1302 cg = remote.changegroupsubset(fetch, heads, 'pull')
1303 cg = remote.changegroupsubset(fetch, heads, 'pull')
1303 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1304 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1304 finally:
1305 finally:
1305 lock.release()
1306 lock.release()
1306
1307
1307 def push(self, remote, force=False, revs=None, newbranch=False):
1308 def push(self, remote, force=False, revs=None, newbranch=False):
1308 '''Push outgoing changesets (limited by revs) from the current
1309 '''Push outgoing changesets (limited by revs) from the current
1309 repository to remote. Return an integer:
1310 repository to remote. Return an integer:
1310 - 0 means HTTP error *or* nothing to push
1311 - 0 means HTTP error *or* nothing to push
1311 - 1 means we pushed and remote head count is unchanged *or*
1312 - 1 means we pushed and remote head count is unchanged *or*
1312 we have outgoing changesets but refused to push
1313 we have outgoing changesets but refused to push
1313 - other values as described by addchangegroup()
1314 - other values as described by addchangegroup()
1314 '''
1315 '''
1315 # there are two ways to push to remote repo:
1316 # there are two ways to push to remote repo:
1316 #
1317 #
1317 # addchangegroup assumes local user can lock remote
1318 # addchangegroup assumes local user can lock remote
1318 # repo (local filesystem, old ssh servers).
1319 # repo (local filesystem, old ssh servers).
1319 #
1320 #
1320 # unbundle assumes local user cannot lock remote repo (new ssh
1321 # unbundle assumes local user cannot lock remote repo (new ssh
1321 # servers, http servers).
1322 # servers, http servers).
1322
1323
1323 lock = None
1324 lock = None
1324 unbundle = remote.capable('unbundle')
1325 unbundle = remote.capable('unbundle')
1325 if not unbundle:
1326 if not unbundle:
1326 lock = remote.lock()
1327 lock = remote.lock()
1327 try:
1328 try:
1328 ret = discovery.prepush(self, remote, force, revs, newbranch)
1329 ret = discovery.prepush(self, remote, force, revs, newbranch)
1329 if ret[0] is None:
1330 if ret[0] is None:
1330 # and here we return 0 for "nothing to push" or 1 for
1331 # and here we return 0 for "nothing to push" or 1 for
1331 # "something to push but I refuse"
1332 # "something to push but I refuse"
1332 return ret[1]
1333 return ret[1]
1333
1334
1334 cg, remote_heads = ret
1335 cg, remote_heads = ret
1335 if unbundle:
1336 if unbundle:
1336 # local repo finds heads on server, finds out what revs it must
1337 # local repo finds heads on server, finds out what revs it must
1337 # push. once revs transferred, if server finds it has
1338 # push. once revs transferred, if server finds it has
1338 # different heads (someone else won commit/push race), server
1339 # different heads (someone else won commit/push race), server
1339 # aborts.
1340 # aborts.
1340 if force:
1341 if force:
1341 remote_heads = ['force']
1342 remote_heads = ['force']
1342 # ssh: return remote's addchangegroup()
1343 # ssh: return remote's addchangegroup()
1343 # http: return remote's addchangegroup() or 0 for error
1344 # http: return remote's addchangegroup() or 0 for error
1344 return remote.unbundle(cg, remote_heads, 'push')
1345 return remote.unbundle(cg, remote_heads, 'push')
1345 else:
1346 else:
1346 # we return an integer indicating remote head count change
1347 # we return an integer indicating remote head count change
1347 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1348 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1348 finally:
1349 finally:
1349 if lock is not None:
1350 if lock is not None:
1350 lock.release()
1351 lock.release()
1351
1352
1352 def changegroupinfo(self, nodes, source):
1353 def changegroupinfo(self, nodes, source):
1353 if self.ui.verbose or source == 'bundle':
1354 if self.ui.verbose or source == 'bundle':
1354 self.ui.status(_("%d changesets found\n") % len(nodes))
1355 self.ui.status(_("%d changesets found\n") % len(nodes))
1355 if self.ui.debugflag:
1356 if self.ui.debugflag:
1356 self.ui.debug("list of changesets:\n")
1357 self.ui.debug("list of changesets:\n")
1357 for node in nodes:
1358 for node in nodes:
1358 self.ui.debug("%s\n" % hex(node))
1359 self.ui.debug("%s\n" % hex(node))
1359
1360
1360 def changegroupsubset(self, bases, heads, source, extranodes=None):
1361 def changegroupsubset(self, bases, heads, source, extranodes=None):
1361 """Compute a changegroup consisting of all the nodes that are
1362 """Compute a changegroup consisting of all the nodes that are
1362 descendents of any of the bases and ancestors of any of the heads.
1363 descendents of any of the bases and ancestors of any of the heads.
1363 Return a chunkbuffer object whose read() method will return
1364 Return a chunkbuffer object whose read() method will return
1364 successive changegroup chunks.
1365 successive changegroup chunks.
1365
1366
1366 It is fairly complex as determining which filenodes and which
1367 It is fairly complex as determining which filenodes and which
1367 manifest nodes need to be included for the changeset to be complete
1368 manifest nodes need to be included for the changeset to be complete
1368 is non-trivial.
1369 is non-trivial.
1369
1370
1370 Another wrinkle is doing the reverse, figuring out which changeset in
1371 Another wrinkle is doing the reverse, figuring out which changeset in
1371 the changegroup a particular filenode or manifestnode belongs to.
1372 the changegroup a particular filenode or manifestnode belongs to.
1372
1373
1373 The caller can specify some nodes that must be included in the
1374 The caller can specify some nodes that must be included in the
1374 changegroup using the extranodes argument. It should be a dict
1375 changegroup using the extranodes argument. It should be a dict
1375 where the keys are the filenames (or 1 for the manifest), and the
1376 where the keys are the filenames (or 1 for the manifest), and the
1376 values are lists of (node, linknode) tuples, where node is a wanted
1377 values are lists of (node, linknode) tuples, where node is a wanted
1377 node and linknode is the changelog node that should be transmitted as
1378 node and linknode is the changelog node that should be transmitted as
1378 the linkrev.
1379 the linkrev.
1379 """
1380 """
1380
1381
1381 # Set up some initial variables
1382 # Set up some initial variables
1382 # Make it easy to refer to self.changelog
1383 # Make it easy to refer to self.changelog
1383 cl = self.changelog
1384 cl = self.changelog
1384 # Compute the list of changesets in this changegroup.
1385 # Compute the list of changesets in this changegroup.
1385 # Some bases may turn out to be superfluous, and some heads may be
1386 # Some bases may turn out to be superfluous, and some heads may be
1386 # too. nodesbetween will return the minimal set of bases and heads
1387 # too. nodesbetween will return the minimal set of bases and heads
1387 # necessary to re-create the changegroup.
1388 # necessary to re-create the changegroup.
1388 if not bases:
1389 if not bases:
1389 bases = [nullid]
1390 bases = [nullid]
1390 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1391 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1391
1392
1392 if extranodes is None:
1393 if extranodes is None:
1393 # can we go through the fast path ?
1394 # can we go through the fast path ?
1394 heads.sort()
1395 heads.sort()
1395 allheads = self.heads()
1396 allheads = self.heads()
1396 allheads.sort()
1397 allheads.sort()
1397 if heads == allheads:
1398 if heads == allheads:
1398 return self._changegroup(msng_cl_lst, source)
1399 return self._changegroup(msng_cl_lst, source)
1399
1400
1400 # slow path
1401 # slow path
1401 self.hook('preoutgoing', throw=True, source=source)
1402 self.hook('preoutgoing', throw=True, source=source)
1402
1403
1403 self.changegroupinfo(msng_cl_lst, source)
1404 self.changegroupinfo(msng_cl_lst, source)
1404
1405
1405 # We assume that all ancestors of bases are known
1406 # We assume that all ancestors of bases are known
1406 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1407 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1407
1408
1408 # Make it easy to refer to self.manifest
1409 # Make it easy to refer to self.manifest
1409 mnfst = self.manifest
1410 mnfst = self.manifest
1410 # We don't know which manifests are missing yet
1411 # We don't know which manifests are missing yet
1411 msng_mnfst_set = {}
1412 msng_mnfst_set = {}
1412 # Nor do we know which filenodes are missing.
1413 # Nor do we know which filenodes are missing.
1413 msng_filenode_set = {}
1414 msng_filenode_set = {}
1414
1415
1415 # A changeset always belongs to itself, so the changenode lookup
1416 # A changeset always belongs to itself, so the changenode lookup
1416 # function for a changenode is identity.
1417 # function for a changenode is identity.
1417 def identity(x):
1418 def identity(x):
1418 return x
1419 return x
1419
1420
1420 # A function generating function that sets up the initial environment
1421 # A function generating function that sets up the initial environment
1421 # the inner function.
1422 # the inner function.
1422 def filenode_collector(changedfiles):
1423 def filenode_collector(changedfiles):
1423 # This gathers information from each manifestnode included in the
1424 # This gathers information from each manifestnode included in the
1424 # changegroup about which filenodes the manifest node references
1425 # changegroup about which filenodes the manifest node references
1425 # so we can include those in the changegroup too.
1426 # so we can include those in the changegroup too.
1426 #
1427 #
1427 # It also remembers which changenode each filenode belongs to. It
1428 # It also remembers which changenode each filenode belongs to. It
1428 # does this by assuming the a filenode belongs to the changenode
1429 # does this by assuming the a filenode belongs to the changenode
1429 # the first manifest that references it belongs to.
1430 # the first manifest that references it belongs to.
1430 def collect_msng_filenodes(mnfstnode):
1431 def collect_msng_filenodes(mnfstnode):
1431 r = mnfst.rev(mnfstnode)
1432 r = mnfst.rev(mnfstnode)
1432 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1433 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1433 # If the previous rev is one of the parents,
1434 # If the previous rev is one of the parents,
1434 # we only need to see a diff.
1435 # we only need to see a diff.
1435 deltamf = mnfst.readdelta(mnfstnode)
1436 deltamf = mnfst.readdelta(mnfstnode)
1436 # For each line in the delta
1437 # For each line in the delta
1437 for f, fnode in deltamf.iteritems():
1438 for f, fnode in deltamf.iteritems():
1438 # And if the file is in the list of files we care
1439 # And if the file is in the list of files we care
1439 # about.
1440 # about.
1440 if f in changedfiles:
1441 if f in changedfiles:
1441 # Get the changenode this manifest belongs to
1442 # Get the changenode this manifest belongs to
1442 clnode = msng_mnfst_set[mnfstnode]
1443 clnode = msng_mnfst_set[mnfstnode]
1443 # Create the set of filenodes for the file if
1444 # Create the set of filenodes for the file if
1444 # there isn't one already.
1445 # there isn't one already.
1445 ndset = msng_filenode_set.setdefault(f, {})
1446 ndset = msng_filenode_set.setdefault(f, {})
1446 # And set the filenode's changelog node to the
1447 # And set the filenode's changelog node to the
1447 # manifest's if it hasn't been set already.
1448 # manifest's if it hasn't been set already.
1448 ndset.setdefault(fnode, clnode)
1449 ndset.setdefault(fnode, clnode)
1449 else:
1450 else:
1450 # Otherwise we need a full manifest.
1451 # Otherwise we need a full manifest.
1451 m = mnfst.read(mnfstnode)
1452 m = mnfst.read(mnfstnode)
1452 # For every file in we care about.
1453 # For every file in we care about.
1453 for f in changedfiles:
1454 for f in changedfiles:
1454 fnode = m.get(f, None)
1455 fnode = m.get(f, None)
1455 # If it's in the manifest
1456 # If it's in the manifest
1456 if fnode is not None:
1457 if fnode is not None:
1457 # See comments above.
1458 # See comments above.
1458 clnode = msng_mnfst_set[mnfstnode]
1459 clnode = msng_mnfst_set[mnfstnode]
1459 ndset = msng_filenode_set.setdefault(f, {})
1460 ndset = msng_filenode_set.setdefault(f, {})
1460 ndset.setdefault(fnode, clnode)
1461 ndset.setdefault(fnode, clnode)
1461 return collect_msng_filenodes
1462 return collect_msng_filenodes
1462
1463
1463 # If we determine that a particular file or manifest node must be a
1464 # If we determine that a particular file or manifest node must be a
1464 # node that the recipient of the changegroup will already have, we can
1465 # node that the recipient of the changegroup will already have, we can
1465 # also assume the recipient will have all the parents. This function
1466 # also assume the recipient will have all the parents. This function
1466 # prunes them from the set of missing nodes.
1467 # prunes them from the set of missing nodes.
1467 def prune(revlog, missingnodes):
1468 def prune(revlog, missingnodes):
1468 hasset = set()
1469 hasset = set()
1469 # If a 'missing' filenode thinks it belongs to a changenode we
1470 # If a 'missing' filenode thinks it belongs to a changenode we
1470 # assume the recipient must have, then the recipient must have
1471 # assume the recipient must have, then the recipient must have
1471 # that filenode.
1472 # that filenode.
1472 for n in missingnodes:
1473 for n in missingnodes:
1473 clrev = revlog.linkrev(revlog.rev(n))
1474 clrev = revlog.linkrev(revlog.rev(n))
1474 if clrev in commonrevs:
1475 if clrev in commonrevs:
1475 hasset.add(n)
1476 hasset.add(n)
1476 for n in hasset:
1477 for n in hasset:
1477 missingnodes.pop(n, None)
1478 missingnodes.pop(n, None)
1478 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1479 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1479 missingnodes.pop(revlog.node(r), None)
1480 missingnodes.pop(revlog.node(r), None)
1480
1481
1481 # Add the nodes that were explicitly requested.
1482 # Add the nodes that were explicitly requested.
1482 def add_extra_nodes(name, nodes):
1483 def add_extra_nodes(name, nodes):
1483 if not extranodes or name not in extranodes:
1484 if not extranodes or name not in extranodes:
1484 return
1485 return
1485
1486
1486 for node, linknode in extranodes[name]:
1487 for node, linknode in extranodes[name]:
1487 if node not in nodes:
1488 if node not in nodes:
1488 nodes[node] = linknode
1489 nodes[node] = linknode
1489
1490
1490 # Now that we have all theses utility functions to help out and
1491 # Now that we have all theses utility functions to help out and
1491 # logically divide up the task, generate the group.
1492 # logically divide up the task, generate the group.
1492 def gengroup():
1493 def gengroup():
1493 # The set of changed files starts empty.
1494 # The set of changed files starts empty.
1494 changedfiles = set()
1495 changedfiles = set()
1495 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1496 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1496
1497
1497 # Create a changenode group generator that will call our functions
1498 # Create a changenode group generator that will call our functions
1498 # back to lookup the owning changenode and collect information.
1499 # back to lookup the owning changenode and collect information.
1499 group = cl.group(msng_cl_lst, identity, collect)
1500 group = cl.group(msng_cl_lst, identity, collect)
1500 for cnt, chnk in enumerate(group):
1501 for cnt, chnk in enumerate(group):
1501 yield chnk
1502 yield chnk
1502 # revlog.group yields three entries per node, so
1503 # revlog.group yields three entries per node, so
1503 # dividing by 3 gives an approximation of how many
1504 # dividing by 3 gives an approximation of how many
1504 # nodes have been processed.
1505 # nodes have been processed.
1505 self.ui.progress(_('bundling'), cnt / 3,
1506 self.ui.progress(_('bundling'), cnt / 3,
1506 unit=_('changesets'))
1507 unit=_('changesets'))
1507 changecount = cnt / 3
1508 changecount = cnt / 3
1508 self.ui.progress(_('bundling'), None)
1509 self.ui.progress(_('bundling'), None)
1509
1510
1510 prune(mnfst, msng_mnfst_set)
1511 prune(mnfst, msng_mnfst_set)
1511 add_extra_nodes(1, msng_mnfst_set)
1512 add_extra_nodes(1, msng_mnfst_set)
1512 msng_mnfst_lst = msng_mnfst_set.keys()
1513 msng_mnfst_lst = msng_mnfst_set.keys()
1513 # Sort the manifestnodes by revision number.
1514 # Sort the manifestnodes by revision number.
1514 msng_mnfst_lst.sort(key=mnfst.rev)
1515 msng_mnfst_lst.sort(key=mnfst.rev)
1515 # Create a generator for the manifestnodes that calls our lookup
1516 # Create a generator for the manifestnodes that calls our lookup
1516 # and data collection functions back.
1517 # and data collection functions back.
1517 group = mnfst.group(msng_mnfst_lst,
1518 group = mnfst.group(msng_mnfst_lst,
1518 lambda mnode: msng_mnfst_set[mnode],
1519 lambda mnode: msng_mnfst_set[mnode],
1519 filenode_collector(changedfiles))
1520 filenode_collector(changedfiles))
1520 efiles = {}
1521 efiles = {}
1521 for cnt, chnk in enumerate(group):
1522 for cnt, chnk in enumerate(group):
1522 if cnt % 3 == 1:
1523 if cnt % 3 == 1:
1523 mnode = chnk[:20]
1524 mnode = chnk[:20]
1524 efiles.update(mnfst.readdelta(mnode))
1525 efiles.update(mnfst.readdelta(mnode))
1525 yield chnk
1526 yield chnk
1526 # see above comment for why we divide by 3
1527 # see above comment for why we divide by 3
1527 self.ui.progress(_('bundling'), cnt / 3,
1528 self.ui.progress(_('bundling'), cnt / 3,
1528 unit=_('manifests'), total=changecount)
1529 unit=_('manifests'), total=changecount)
1529 self.ui.progress(_('bundling'), None)
1530 self.ui.progress(_('bundling'), None)
1530 efiles = len(efiles)
1531 efiles = len(efiles)
1531
1532
1532 # These are no longer needed, dereference and toss the memory for
1533 # These are no longer needed, dereference and toss the memory for
1533 # them.
1534 # them.
1534 msng_mnfst_lst = None
1535 msng_mnfst_lst = None
1535 msng_mnfst_set.clear()
1536 msng_mnfst_set.clear()
1536
1537
1537 if extranodes:
1538 if extranodes:
1538 for fname in extranodes:
1539 for fname in extranodes:
1539 if isinstance(fname, int):
1540 if isinstance(fname, int):
1540 continue
1541 continue
1541 msng_filenode_set.setdefault(fname, {})
1542 msng_filenode_set.setdefault(fname, {})
1542 changedfiles.add(fname)
1543 changedfiles.add(fname)
1543 # Go through all our files in order sorted by name.
1544 # Go through all our files in order sorted by name.
1544 for idx, fname in enumerate(sorted(changedfiles)):
1545 for idx, fname in enumerate(sorted(changedfiles)):
1545 filerevlog = self.file(fname)
1546 filerevlog = self.file(fname)
1546 if not len(filerevlog):
1547 if not len(filerevlog):
1547 raise util.Abort(_("empty or missing revlog for %s") % fname)
1548 raise util.Abort(_("empty or missing revlog for %s") % fname)
1548 # Toss out the filenodes that the recipient isn't really
1549 # Toss out the filenodes that the recipient isn't really
1549 # missing.
1550 # missing.
1550 missingfnodes = msng_filenode_set.pop(fname, {})
1551 missingfnodes = msng_filenode_set.pop(fname, {})
1551 prune(filerevlog, missingfnodes)
1552 prune(filerevlog, missingfnodes)
1552 add_extra_nodes(fname, missingfnodes)
1553 add_extra_nodes(fname, missingfnodes)
1553 # If any filenodes are left, generate the group for them,
1554 # If any filenodes are left, generate the group for them,
1554 # otherwise don't bother.
1555 # otherwise don't bother.
1555 if missingfnodes:
1556 if missingfnodes:
1556 yield changegroup.chunkheader(len(fname))
1557 yield changegroup.chunkheader(len(fname))
1557 yield fname
1558 yield fname
1558 # Sort the filenodes by their revision # (topological order)
1559 # Sort the filenodes by their revision # (topological order)
1559 nodeiter = list(missingfnodes)
1560 nodeiter = list(missingfnodes)
1560 nodeiter.sort(key=filerevlog.rev)
1561 nodeiter.sort(key=filerevlog.rev)
1561 # Create a group generator and only pass in a changenode
1562 # Create a group generator and only pass in a changenode
1562 # lookup function as we need to collect no information
1563 # lookup function as we need to collect no information
1563 # from filenodes.
1564 # from filenodes.
1564 group = filerevlog.group(nodeiter,
1565 group = filerevlog.group(nodeiter,
1565 lambda fnode: missingfnodes[fnode])
1566 lambda fnode: missingfnodes[fnode])
1566 for chnk in group:
1567 for chnk in group:
1567 # even though we print the same progress on
1568 # even though we print the same progress on
1568 # most loop iterations, put the progress call
1569 # most loop iterations, put the progress call
1569 # here so that time estimates (if any) can be updated
1570 # here so that time estimates (if any) can be updated
1570 self.ui.progress(
1571 self.ui.progress(
1571 _('bundling'), idx, item=fname,
1572 _('bundling'), idx, item=fname,
1572 unit=_('files'), total=efiles)
1573 unit=_('files'), total=efiles)
1573 yield chnk
1574 yield chnk
1574 # Signal that no more groups are left.
1575 # Signal that no more groups are left.
1575 yield changegroup.closechunk()
1576 yield changegroup.closechunk()
1576 self.ui.progress(_('bundling'), None)
1577 self.ui.progress(_('bundling'), None)
1577
1578
1578 if msng_cl_lst:
1579 if msng_cl_lst:
1579 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1580 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1580
1581
1581 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1582 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1582
1583
1583 def changegroup(self, basenodes, source):
1584 def changegroup(self, basenodes, source):
1584 # to avoid a race we use changegroupsubset() (issue1320)
1585 # to avoid a race we use changegroupsubset() (issue1320)
1585 return self.changegroupsubset(basenodes, self.heads(), source)
1586 return self.changegroupsubset(basenodes, self.heads(), source)
1586
1587
1587 def _changegroup(self, nodes, source):
1588 def _changegroup(self, nodes, source):
1588 """Compute the changegroup of all nodes that we have that a recipient
1589 """Compute the changegroup of all nodes that we have that a recipient
1589 doesn't. Return a chunkbuffer object whose read() method will return
1590 doesn't. Return a chunkbuffer object whose read() method will return
1590 successive changegroup chunks.
1591 successive changegroup chunks.
1591
1592
1592 This is much easier than the previous function as we can assume that
1593 This is much easier than the previous function as we can assume that
1593 the recipient has any changenode we aren't sending them.
1594 the recipient has any changenode we aren't sending them.
1594
1595
1595 nodes is the set of nodes to send"""
1596 nodes is the set of nodes to send"""
1596
1597
1597 self.hook('preoutgoing', throw=True, source=source)
1598 self.hook('preoutgoing', throw=True, source=source)
1598
1599
1599 cl = self.changelog
1600 cl = self.changelog
1600 revset = set([cl.rev(n) for n in nodes])
1601 revset = set([cl.rev(n) for n in nodes])
1601 self.changegroupinfo(nodes, source)
1602 self.changegroupinfo(nodes, source)
1602
1603
1603 def identity(x):
1604 def identity(x):
1604 return x
1605 return x
1605
1606
1606 def gennodelst(log):
1607 def gennodelst(log):
1607 for r in log:
1608 for r in log:
1608 if log.linkrev(r) in revset:
1609 if log.linkrev(r) in revset:
1609 yield log.node(r)
1610 yield log.node(r)
1610
1611
1611 def lookuplinkrev_func(revlog):
1612 def lookuplinkrev_func(revlog):
1612 def lookuplinkrev(n):
1613 def lookuplinkrev(n):
1613 return cl.node(revlog.linkrev(revlog.rev(n)))
1614 return cl.node(revlog.linkrev(revlog.rev(n)))
1614 return lookuplinkrev
1615 return lookuplinkrev
1615
1616
1616 def gengroup():
1617 def gengroup():
1617 '''yield a sequence of changegroup chunks (strings)'''
1618 '''yield a sequence of changegroup chunks (strings)'''
1618 # construct a list of all changed files
1619 # construct a list of all changed files
1619 changedfiles = set()
1620 changedfiles = set()
1620 mmfs = {}
1621 mmfs = {}
1621 collect = changegroup.collector(cl, mmfs, changedfiles)
1622 collect = changegroup.collector(cl, mmfs, changedfiles)
1622
1623
1623 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1624 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1624 # revlog.group yields three entries per node, so
1625 # revlog.group yields three entries per node, so
1625 # dividing by 3 gives an approximation of how many
1626 # dividing by 3 gives an approximation of how many
1626 # nodes have been processed.
1627 # nodes have been processed.
1627 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1628 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1628 yield chnk
1629 yield chnk
1629 changecount = cnt / 3
1630 changecount = cnt / 3
1630 self.ui.progress(_('bundling'), None)
1631 self.ui.progress(_('bundling'), None)
1631
1632
1632 mnfst = self.manifest
1633 mnfst = self.manifest
1633 nodeiter = gennodelst(mnfst)
1634 nodeiter = gennodelst(mnfst)
1634 efiles = {}
1635 efiles = {}
1635 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1636 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1636 lookuplinkrev_func(mnfst))):
1637 lookuplinkrev_func(mnfst))):
1637 if cnt % 3 == 1:
1638 if cnt % 3 == 1:
1638 mnode = chnk[:20]
1639 mnode = chnk[:20]
1639 efiles.update(mnfst.readdelta(mnode))
1640 efiles.update(mnfst.readdelta(mnode))
1640 # see above comment for why we divide by 3
1641 # see above comment for why we divide by 3
1641 self.ui.progress(_('bundling'), cnt / 3,
1642 self.ui.progress(_('bundling'), cnt / 3,
1642 unit=_('manifests'), total=changecount)
1643 unit=_('manifests'), total=changecount)
1643 yield chnk
1644 yield chnk
1644 efiles = len(efiles)
1645 efiles = len(efiles)
1645 self.ui.progress(_('bundling'), None)
1646 self.ui.progress(_('bundling'), None)
1646
1647
1647 for idx, fname in enumerate(sorted(changedfiles)):
1648 for idx, fname in enumerate(sorted(changedfiles)):
1648 filerevlog = self.file(fname)
1649 filerevlog = self.file(fname)
1649 if not len(filerevlog):
1650 if not len(filerevlog):
1650 raise util.Abort(_("empty or missing revlog for %s") % fname)
1651 raise util.Abort(_("empty or missing revlog for %s") % fname)
1651 nodeiter = gennodelst(filerevlog)
1652 nodeiter = gennodelst(filerevlog)
1652 nodeiter = list(nodeiter)
1653 nodeiter = list(nodeiter)
1653 if nodeiter:
1654 if nodeiter:
1654 yield changegroup.chunkheader(len(fname))
1655 yield changegroup.chunkheader(len(fname))
1655 yield fname
1656 yield fname
1656 lookup = lookuplinkrev_func(filerevlog)
1657 lookup = lookuplinkrev_func(filerevlog)
1657 for chnk in filerevlog.group(nodeiter, lookup):
1658 for chnk in filerevlog.group(nodeiter, lookup):
1658 self.ui.progress(
1659 self.ui.progress(
1659 _('bundling'), idx, item=fname,
1660 _('bundling'), idx, item=fname,
1660 total=efiles, unit=_('files'))
1661 total=efiles, unit=_('files'))
1661 yield chnk
1662 yield chnk
1662 self.ui.progress(_('bundling'), None)
1663 self.ui.progress(_('bundling'), None)
1663
1664
1664 yield changegroup.closechunk()
1665 yield changegroup.closechunk()
1665
1666
1666 if nodes:
1667 if nodes:
1667 self.hook('outgoing', node=hex(nodes[0]), source=source)
1668 self.hook('outgoing', node=hex(nodes[0]), source=source)
1668
1669
1669 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1670 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1670
1671
1671 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1672 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1672 """Add the changegroup returned by source.read() to this repo.
1673 """Add the changegroup returned by source.read() to this repo.
1673 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1674 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1674 the URL of the repo where this changegroup is coming from.
1675 the URL of the repo where this changegroup is coming from.
1675 If lock is not None, the function takes ownership of the lock
1676 If lock is not None, the function takes ownership of the lock
1676 and releases it after the changegroup is added.
1677 and releases it after the changegroup is added.
1677
1678
1678 Return an integer summarizing the change to this repo:
1679 Return an integer summarizing the change to this repo:
1679 - nothing changed or no source: 0
1680 - nothing changed or no source: 0
1680 - more heads than before: 1+added heads (2..n)
1681 - more heads than before: 1+added heads (2..n)
1681 - fewer heads than before: -1-removed heads (-2..-n)
1682 - fewer heads than before: -1-removed heads (-2..-n)
1682 - number of heads stays the same: 1
1683 - number of heads stays the same: 1
1683 """
1684 """
1684 def csmap(x):
1685 def csmap(x):
1685 self.ui.debug("add changeset %s\n" % short(x))
1686 self.ui.debug("add changeset %s\n" % short(x))
1686 return len(cl)
1687 return len(cl)
1687
1688
1688 def revmap(x):
1689 def revmap(x):
1689 return cl.rev(x)
1690 return cl.rev(x)
1690
1691
1691 if not source:
1692 if not source:
1692 return 0
1693 return 0
1693
1694
1694 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1695 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1695
1696
1696 changesets = files = revisions = 0
1697 changesets = files = revisions = 0
1697 efiles = set()
1698 efiles = set()
1698
1699
1699 # write changelog data to temp files so concurrent readers will not see
1700 # write changelog data to temp files so concurrent readers will not see
1700 # inconsistent view
1701 # inconsistent view
1701 cl = self.changelog
1702 cl = self.changelog
1702 cl.delayupdate()
1703 cl.delayupdate()
1703 oldheads = len(cl.heads())
1704 oldheads = len(cl.heads())
1704
1705
1705 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1706 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1706 try:
1707 try:
1707 trp = weakref.proxy(tr)
1708 trp = weakref.proxy(tr)
1708 # pull off the changeset group
1709 # pull off the changeset group
1709 self.ui.status(_("adding changesets\n"))
1710 self.ui.status(_("adding changesets\n"))
1710 clstart = len(cl)
1711 clstart = len(cl)
1711 class prog(object):
1712 class prog(object):
1712 step = _('changesets')
1713 step = _('changesets')
1713 count = 1
1714 count = 1
1714 ui = self.ui
1715 ui = self.ui
1715 total = None
1716 total = None
1716 def __call__(self):
1717 def __call__(self):
1717 self.ui.progress(self.step, self.count, unit=_('chunks'),
1718 self.ui.progress(self.step, self.count, unit=_('chunks'),
1718 total=self.total)
1719 total=self.total)
1719 self.count += 1
1720 self.count += 1
1720 pr = prog()
1721 pr = prog()
1721 source.callback = pr
1722 source.callback = pr
1722
1723
1723 if (cl.addgroup(source, csmap, trp) is None
1724 if (cl.addgroup(source, csmap, trp) is None
1724 and not emptyok):
1725 and not emptyok):
1725 raise util.Abort(_("received changelog group is empty"))
1726 raise util.Abort(_("received changelog group is empty"))
1726 clend = len(cl)
1727 clend = len(cl)
1727 changesets = clend - clstart
1728 changesets = clend - clstart
1728 for c in xrange(clstart, clend):
1729 for c in xrange(clstart, clend):
1729 efiles.update(self[c].files())
1730 efiles.update(self[c].files())
1730 efiles = len(efiles)
1731 efiles = len(efiles)
1731 self.ui.progress(_('changesets'), None)
1732 self.ui.progress(_('changesets'), None)
1732
1733
1733 # pull off the manifest group
1734 # pull off the manifest group
1734 self.ui.status(_("adding manifests\n"))
1735 self.ui.status(_("adding manifests\n"))
1735 pr.step = _('manifests')
1736 pr.step = _('manifests')
1736 pr.count = 1
1737 pr.count = 1
1737 pr.total = changesets # manifests <= changesets
1738 pr.total = changesets # manifests <= changesets
1738 # no need to check for empty manifest group here:
1739 # no need to check for empty manifest group here:
1739 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1740 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1740 # no new manifest will be created and the manifest group will
1741 # no new manifest will be created and the manifest group will
1741 # be empty during the pull
1742 # be empty during the pull
1742 self.manifest.addgroup(source, revmap, trp)
1743 self.manifest.addgroup(source, revmap, trp)
1743 self.ui.progress(_('manifests'), None)
1744 self.ui.progress(_('manifests'), None)
1744
1745
1745 needfiles = {}
1746 needfiles = {}
1746 if self.ui.configbool('server', 'validate', default=False):
1747 if self.ui.configbool('server', 'validate', default=False):
1747 # validate incoming csets have their manifests
1748 # validate incoming csets have their manifests
1748 for cset in xrange(clstart, clend):
1749 for cset in xrange(clstart, clend):
1749 mfest = self.changelog.read(self.changelog.node(cset))[0]
1750 mfest = self.changelog.read(self.changelog.node(cset))[0]
1750 mfest = self.manifest.readdelta(mfest)
1751 mfest = self.manifest.readdelta(mfest)
1751 # store file nodes we must see
1752 # store file nodes we must see
1752 for f, n in mfest.iteritems():
1753 for f, n in mfest.iteritems():
1753 needfiles.setdefault(f, set()).add(n)
1754 needfiles.setdefault(f, set()).add(n)
1754
1755
1755 # process the files
1756 # process the files
1756 self.ui.status(_("adding file changes\n"))
1757 self.ui.status(_("adding file changes\n"))
1757 pr.step = 'files'
1758 pr.step = 'files'
1758 pr.count = 1
1759 pr.count = 1
1759 pr.total = efiles
1760 pr.total = efiles
1760 source.callback = None
1761 source.callback = None
1761
1762
1762 while 1:
1763 while 1:
1763 f = source.chunk()
1764 f = source.chunk()
1764 if not f:
1765 if not f:
1765 break
1766 break
1766 self.ui.debug("adding %s revisions\n" % f)
1767 self.ui.debug("adding %s revisions\n" % f)
1767 pr()
1768 pr()
1768 fl = self.file(f)
1769 fl = self.file(f)
1769 o = len(fl)
1770 o = len(fl)
1770 if fl.addgroup(source, revmap, trp) is None:
1771 if fl.addgroup(source, revmap, trp) is None:
1771 raise util.Abort(_("received file revlog group is empty"))
1772 raise util.Abort(_("received file revlog group is empty"))
1772 revisions += len(fl) - o
1773 revisions += len(fl) - o
1773 files += 1
1774 files += 1
1774 if f in needfiles:
1775 if f in needfiles:
1775 needs = needfiles[f]
1776 needs = needfiles[f]
1776 for new in xrange(o, len(fl)):
1777 for new in xrange(o, len(fl)):
1777 n = fl.node(new)
1778 n = fl.node(new)
1778 if n in needs:
1779 if n in needs:
1779 needs.remove(n)
1780 needs.remove(n)
1780 if not needs:
1781 if not needs:
1781 del needfiles[f]
1782 del needfiles[f]
1782 self.ui.progress(_('files'), None)
1783 self.ui.progress(_('files'), None)
1783
1784
1784 for f, needs in needfiles.iteritems():
1785 for f, needs in needfiles.iteritems():
1785 fl = self.file(f)
1786 fl = self.file(f)
1786 for n in needs:
1787 for n in needs:
1787 try:
1788 try:
1788 fl.rev(n)
1789 fl.rev(n)
1789 except error.LookupError:
1790 except error.LookupError:
1790 raise util.Abort(
1791 raise util.Abort(
1791 _('missing file data for %s:%s - run hg verify') %
1792 _('missing file data for %s:%s - run hg verify') %
1792 (f, hex(n)))
1793 (f, hex(n)))
1793
1794
1794 newheads = len(cl.heads())
1795 newheads = len(cl.heads())
1795 heads = ""
1796 heads = ""
1796 if oldheads and newheads != oldheads:
1797 if oldheads and newheads != oldheads:
1797 heads = _(" (%+d heads)") % (newheads - oldheads)
1798 heads = _(" (%+d heads)") % (newheads - oldheads)
1798
1799
1799 self.ui.status(_("added %d changesets"
1800 self.ui.status(_("added %d changesets"
1800 " with %d changes to %d files%s\n")
1801 " with %d changes to %d files%s\n")
1801 % (changesets, revisions, files, heads))
1802 % (changesets, revisions, files, heads))
1802
1803
1803 if changesets > 0:
1804 if changesets > 0:
1804 p = lambda: cl.writepending() and self.root or ""
1805 p = lambda: cl.writepending() and self.root or ""
1805 self.hook('pretxnchangegroup', throw=True,
1806 self.hook('pretxnchangegroup', throw=True,
1806 node=hex(cl.node(clstart)), source=srctype,
1807 node=hex(cl.node(clstart)), source=srctype,
1807 url=url, pending=p)
1808 url=url, pending=p)
1808
1809
1809 # make changelog see real files again
1810 # make changelog see real files again
1810 cl.finalize(trp)
1811 cl.finalize(trp)
1811
1812
1812 tr.close()
1813 tr.close()
1813 finally:
1814 finally:
1814 tr.release()
1815 tr.release()
1815 if lock:
1816 if lock:
1816 lock.release()
1817 lock.release()
1817
1818
1818 if changesets > 0:
1819 if changesets > 0:
1819 # forcefully update the on-disk branch cache
1820 # forcefully update the on-disk branch cache
1820 self.ui.debug("updating the branch cache\n")
1821 self.ui.debug("updating the branch cache\n")
1821 self.updatebranchcache()
1822 self.updatebranchcache()
1822 self.hook("changegroup", node=hex(cl.node(clstart)),
1823 self.hook("changegroup", node=hex(cl.node(clstart)),
1823 source=srctype, url=url)
1824 source=srctype, url=url)
1824
1825
1825 for i in xrange(clstart, clend):
1826 for i in xrange(clstart, clend):
1826 self.hook("incoming", node=hex(cl.node(i)),
1827 self.hook("incoming", node=hex(cl.node(i)),
1827 source=srctype, url=url)
1828 source=srctype, url=url)
1828
1829
1829 # never return 0 here:
1830 # never return 0 here:
1830 if newheads < oldheads:
1831 if newheads < oldheads:
1831 return newheads - oldheads - 1
1832 return newheads - oldheads - 1
1832 else:
1833 else:
1833 return newheads - oldheads + 1
1834 return newheads - oldheads + 1
1834
1835
1835
1836
1836 def stream_in(self, remote, requirements):
1837 def stream_in(self, remote, requirements):
1837 fp = remote.stream_out()
1838 fp = remote.stream_out()
1838 l = fp.readline()
1839 l = fp.readline()
1839 try:
1840 try:
1840 resp = int(l)
1841 resp = int(l)
1841 except ValueError:
1842 except ValueError:
1842 raise error.ResponseError(
1843 raise error.ResponseError(
1843 _('Unexpected response from remote server:'), l)
1844 _('Unexpected response from remote server:'), l)
1844 if resp == 1:
1845 if resp == 1:
1845 raise util.Abort(_('operation forbidden by server'))
1846 raise util.Abort(_('operation forbidden by server'))
1846 elif resp == 2:
1847 elif resp == 2:
1847 raise util.Abort(_('locking the remote repository failed'))
1848 raise util.Abort(_('locking the remote repository failed'))
1848 elif resp != 0:
1849 elif resp != 0:
1849 raise util.Abort(_('the server sent an unknown error code'))
1850 raise util.Abort(_('the server sent an unknown error code'))
1850 self.ui.status(_('streaming all changes\n'))
1851 self.ui.status(_('streaming all changes\n'))
1851 l = fp.readline()
1852 l = fp.readline()
1852 try:
1853 try:
1853 total_files, total_bytes = map(int, l.split(' ', 1))
1854 total_files, total_bytes = map(int, l.split(' ', 1))
1854 except (ValueError, TypeError):
1855 except (ValueError, TypeError):
1855 raise error.ResponseError(
1856 raise error.ResponseError(
1856 _('Unexpected response from remote server:'), l)
1857 _('Unexpected response from remote server:'), l)
1857 self.ui.status(_('%d files to transfer, %s of data\n') %
1858 self.ui.status(_('%d files to transfer, %s of data\n') %
1858 (total_files, util.bytecount(total_bytes)))
1859 (total_files, util.bytecount(total_bytes)))
1859 start = time.time()
1860 start = time.time()
1860 for i in xrange(total_files):
1861 for i in xrange(total_files):
1861 # XXX doesn't support '\n' or '\r' in filenames
1862 # XXX doesn't support '\n' or '\r' in filenames
1862 l = fp.readline()
1863 l = fp.readline()
1863 try:
1864 try:
1864 name, size = l.split('\0', 1)
1865 name, size = l.split('\0', 1)
1865 size = int(size)
1866 size = int(size)
1866 except (ValueError, TypeError):
1867 except (ValueError, TypeError):
1867 raise error.ResponseError(
1868 raise error.ResponseError(
1868 _('Unexpected response from remote server:'), l)
1869 _('Unexpected response from remote server:'), l)
1869 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1870 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1870 # for backwards compat, name was partially encoded
1871 # for backwards compat, name was partially encoded
1871 ofp = self.sopener(store.decodedir(name), 'w')
1872 ofp = self.sopener(store.decodedir(name), 'w')
1872 for chunk in util.filechunkiter(fp, limit=size):
1873 for chunk in util.filechunkiter(fp, limit=size):
1873 ofp.write(chunk)
1874 ofp.write(chunk)
1874 ofp.close()
1875 ofp.close()
1875 elapsed = time.time() - start
1876 elapsed = time.time() - start
1876 if elapsed <= 0:
1877 if elapsed <= 0:
1877 elapsed = 0.001
1878 elapsed = 0.001
1878 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1879 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1879 (util.bytecount(total_bytes), elapsed,
1880 (util.bytecount(total_bytes), elapsed,
1880 util.bytecount(total_bytes / elapsed)))
1881 util.bytecount(total_bytes / elapsed)))
1881
1882
1882 # new requirements = old non-format requirements + new format-related
1883 # new requirements = old non-format requirements + new format-related
1883 # requirements from the streamed-in repository
1884 # requirements from the streamed-in repository
1884 requirements.update(set(self.requirements) - self.supportedformats)
1885 requirements.update(set(self.requirements) - self.supportedformats)
1885 self._applyrequirements(requirements)
1886 self._applyrequirements(requirements)
1886 self._writerequirements()
1887 self._writerequirements()
1887
1888
1888 self.invalidate()
1889 self.invalidate()
1889 return len(self.heads()) + 1
1890 return len(self.heads()) + 1
1890
1891
1891 def clone(self, remote, heads=[], stream=False):
1892 def clone(self, remote, heads=[], stream=False):
1892 '''clone remote repository.
1893 '''clone remote repository.
1893
1894
1894 keyword arguments:
1895 keyword arguments:
1895 heads: list of revs to clone (forces use of pull)
1896 heads: list of revs to clone (forces use of pull)
1896 stream: use streaming clone if possible'''
1897 stream: use streaming clone if possible'''
1897
1898
1898 # now, all clients that can request uncompressed clones can
1899 # now, all clients that can request uncompressed clones can
1899 # read repo formats supported by all servers that can serve
1900 # read repo formats supported by all servers that can serve
1900 # them.
1901 # them.
1901
1902
1902 # if revlog format changes, client will have to check version
1903 # if revlog format changes, client will have to check version
1903 # and format flags on "stream" capability, and use
1904 # and format flags on "stream" capability, and use
1904 # uncompressed only if compatible.
1905 # uncompressed only if compatible.
1905
1906
1906 if stream and not heads:
1907 if stream and not heads:
1907 # 'stream' means remote revlog format is revlogv1 only
1908 # 'stream' means remote revlog format is revlogv1 only
1908 if remote.capable('stream'):
1909 if remote.capable('stream'):
1909 return self.stream_in(remote, set(('revlogv1',)))
1910 return self.stream_in(remote, set(('revlogv1',)))
1910 # otherwise, 'streamreqs' contains the remote revlog format
1911 # otherwise, 'streamreqs' contains the remote revlog format
1911 streamreqs = remote.capable('streamreqs')
1912 streamreqs = remote.capable('streamreqs')
1912 if streamreqs:
1913 if streamreqs:
1913 streamreqs = set(streamreqs.split(','))
1914 streamreqs = set(streamreqs.split(','))
1914 # if we support it, stream in and adjust our requirements
1915 # if we support it, stream in and adjust our requirements
1915 if not streamreqs - self.supportedformats:
1916 if not streamreqs - self.supportedformats:
1916 return self.stream_in(remote, streamreqs)
1917 return self.stream_in(remote, streamreqs)
1917 return self.pull(remote, heads)
1918 return self.pull(remote, heads)
1918
1919
1919 def pushkey(self, namespace, key, old, new):
1920 def pushkey(self, namespace, key, old, new):
1920 return pushkey.push(self, namespace, key, old, new)
1921 return pushkey.push(self, namespace, key, old, new)
1921
1922
1922 def listkeys(self, namespace):
1923 def listkeys(self, namespace):
1923 return pushkey.list(self, namespace)
1924 return pushkey.list(self, namespace)
1924
1925
1925 # used to avoid circular references so destructors work
1926 # used to avoid circular references so destructors work
1926 def aftertrans(files):
1927 def aftertrans(files):
1927 renamefiles = [tuple(t) for t in files]
1928 renamefiles = [tuple(t) for t in files]
1928 def a():
1929 def a():
1929 for src, dest in renamefiles:
1930 for src, dest in renamefiles:
1930 util.rename(src, dest)
1931 util.rename(src, dest)
1931 return a
1932 return a
1932
1933
1933 def instance(ui, path, create):
1934 def instance(ui, path, create):
1934 return localrepository(ui, util.drop_scheme('file', path), create)
1935 return localrepository(ui, util.drop_scheme('file', path), create)
1935
1936
1936 def islocal(path):
1937 def islocal(path):
1937 return True
1938 return True
@@ -1,285 +1,287 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from node import nullid, bin, hex, short
13 from node import nullid, bin, hex, short
14 from i18n import _
14 from i18n import _
15 import os.path
15 import encoding
16 import encoding
16 import error
17 import error
17
18
18 def findglobaltags(ui, repo, alltags, tagtypes):
19 def findglobaltags(ui, repo, alltags, tagtypes):
19 '''Find global tags in repo by reading .hgtags from every head that
20 '''Find global tags in repo by reading .hgtags from every head that
20 has a distinct version of it, using a cache to avoid excess work.
21 has a distinct version of it, using a cache to avoid excess work.
21 Updates the dicts alltags, tagtypes in place: alltags maps tag name
22 Updates the dicts alltags, tagtypes in place: alltags maps tag name
22 to (node, hist) pair (see _readtags() below), and tagtypes maps tag
23 to (node, hist) pair (see _readtags() below), and tagtypes maps tag
23 name to tag type ("global" in this case).'''
24 name to tag type ("global" in this case).'''
24 # This is so we can be lazy and assume alltags contains only global
25 # This is so we can be lazy and assume alltags contains only global
25 # tags when we pass it to _writetagcache().
26 # tags when we pass it to _writetagcache().
26 assert len(alltags) == len(tagtypes) == 0, \
27 assert len(alltags) == len(tagtypes) == 0, \
27 "findglobaltags() should be called first"
28 "findglobaltags() should be called first"
28
29
29 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
30 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
30 if cachetags is not None:
31 if cachetags is not None:
31 assert not shouldwrite
32 assert not shouldwrite
32 # XXX is this really 100% correct? are there oddball special
33 # XXX is this really 100% correct? are there oddball special
33 # cases where a global tag should outrank a local tag but won't,
34 # cases where a global tag should outrank a local tag but won't,
34 # because cachetags does not contain rank info?
35 # because cachetags does not contain rank info?
35 _updatetags(cachetags, 'global', alltags, tagtypes)
36 _updatetags(cachetags, 'global', alltags, tagtypes)
36 return
37 return
37
38
38 seen = set() # set of fnode
39 seen = set() # set of fnode
39 fctx = None
40 fctx = None
40 for head in reversed(heads): # oldest to newest
41 for head in reversed(heads): # oldest to newest
41 assert head in repo.changelog.nodemap, \
42 assert head in repo.changelog.nodemap, \
42 "tag cache returned bogus head %s" % short(head)
43 "tag cache returned bogus head %s" % short(head)
43
44
44 fnode = tagfnode.get(head)
45 fnode = tagfnode.get(head)
45 if fnode and fnode not in seen:
46 if fnode and fnode not in seen:
46 seen.add(fnode)
47 seen.add(fnode)
47 if not fctx:
48 if not fctx:
48 fctx = repo.filectx('.hgtags', fileid=fnode)
49 fctx = repo.filectx('.hgtags', fileid=fnode)
49 else:
50 else:
50 fctx = fctx.filectx(fnode)
51 fctx = fctx.filectx(fnode)
51
52
52 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
53 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
53 _updatetags(filetags, 'global', alltags, tagtypes)
54 _updatetags(filetags, 'global', alltags, tagtypes)
54
55
55 # and update the cache (if necessary)
56 # and update the cache (if necessary)
56 if shouldwrite:
57 if shouldwrite:
57 _writetagcache(ui, repo, heads, tagfnode, alltags)
58 _writetagcache(ui, repo, heads, tagfnode, alltags)
58
59
59 def readlocaltags(ui, repo, alltags, tagtypes):
60 def readlocaltags(ui, repo, alltags, tagtypes):
60 '''Read local tags in repo. Update alltags and tagtypes.'''
61 '''Read local tags in repo. Update alltags and tagtypes.'''
61 try:
62 try:
62 # localtags is in the local encoding; re-encode to UTF-8 on
63 # localtags is in the local encoding; re-encode to UTF-8 on
63 # input for consistency with the rest of this module.
64 # input for consistency with the rest of this module.
64 data = repo.opener("localtags").read()
65 data = repo.opener("localtags").read()
65 filetags = _readtags(
66 filetags = _readtags(
66 ui, repo, data.splitlines(), "localtags",
67 ui, repo, data.splitlines(), "localtags",
67 recode=encoding.fromlocal)
68 recode=encoding.fromlocal)
68 _updatetags(filetags, "local", alltags, tagtypes)
69 _updatetags(filetags, "local", alltags, tagtypes)
69 except IOError:
70 except IOError:
70 pass
71 pass
71
72
72 def _readtags(ui, repo, lines, fn, recode=None):
73 def _readtags(ui, repo, lines, fn, recode=None):
73 '''Read tag definitions from a file (or any source of lines).
74 '''Read tag definitions from a file (or any source of lines).
74 Return a mapping from tag name to (node, hist): node is the node id
75 Return a mapping from tag name to (node, hist): node is the node id
75 from the last line read for that name, and hist is the list of node
76 from the last line read for that name, and hist is the list of node
76 ids previously associated with it (in file order). All node ids are
77 ids previously associated with it (in file order). All node ids are
77 binary, not hex.'''
78 binary, not hex.'''
78
79
79 filetags = {} # map tag name to (node, hist)
80 filetags = {} # map tag name to (node, hist)
80 count = 0
81 count = 0
81
82
82 def warn(msg):
83 def warn(msg):
83 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
84 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
84
85
85 for line in lines:
86 for line in lines:
86 count += 1
87 count += 1
87 if not line:
88 if not line:
88 continue
89 continue
89 try:
90 try:
90 (nodehex, name) = line.split(" ", 1)
91 (nodehex, name) = line.split(" ", 1)
91 except ValueError:
92 except ValueError:
92 warn(_("cannot parse entry"))
93 warn(_("cannot parse entry"))
93 continue
94 continue
94 name = name.strip()
95 name = name.strip()
95 if recode:
96 if recode:
96 name = recode(name)
97 name = recode(name)
97 try:
98 try:
98 nodebin = bin(nodehex)
99 nodebin = bin(nodehex)
99 except TypeError:
100 except TypeError:
100 warn(_("node '%s' is not well formed") % nodehex)
101 warn(_("node '%s' is not well formed") % nodehex)
101 continue
102 continue
102
103
103 # update filetags
104 # update filetags
104 hist = []
105 hist = []
105 if name in filetags:
106 if name in filetags:
106 n, hist = filetags[name]
107 n, hist = filetags[name]
107 hist.append(n)
108 hist.append(n)
108 filetags[name] = (nodebin, hist)
109 filetags[name] = (nodebin, hist)
109 return filetags
110 return filetags
110
111
111 def _updatetags(filetags, tagtype, alltags, tagtypes):
112 def _updatetags(filetags, tagtype, alltags, tagtypes):
112 '''Incorporate the tag info read from one file into the two
113 '''Incorporate the tag info read from one file into the two
113 dictionaries, alltags and tagtypes, that contain all tag
114 dictionaries, alltags and tagtypes, that contain all tag
114 info (global across all heads plus local).'''
115 info (global across all heads plus local).'''
115
116
116 for name, nodehist in filetags.iteritems():
117 for name, nodehist in filetags.iteritems():
117 if name not in alltags:
118 if name not in alltags:
118 alltags[name] = nodehist
119 alltags[name] = nodehist
119 tagtypes[name] = tagtype
120 tagtypes[name] = tagtype
120 continue
121 continue
121
122
122 # we prefer alltags[name] if:
123 # we prefer alltags[name] if:
123 # it supercedes us OR
124 # it supercedes us OR
124 # mutual supercedes and it has a higher rank
125 # mutual supercedes and it has a higher rank
125 # otherwise we win because we're tip-most
126 # otherwise we win because we're tip-most
126 anode, ahist = nodehist
127 anode, ahist = nodehist
127 bnode, bhist = alltags[name]
128 bnode, bhist = alltags[name]
128 if (bnode != anode and anode in bhist and
129 if (bnode != anode and anode in bhist and
129 (bnode not in ahist or len(bhist) > len(ahist))):
130 (bnode not in ahist or len(bhist) > len(ahist))):
130 anode = bnode
131 anode = bnode
131 ahist.extend([n for n in bhist if n not in ahist])
132 ahist.extend([n for n in bhist if n not in ahist])
132 alltags[name] = anode, ahist
133 alltags[name] = anode, ahist
133 tagtypes[name] = tagtype
134 tagtypes[name] = tagtype
134
135
135
136
136 # The tag cache only stores info about heads, not the tag contents
137 # The tag cache only stores info about heads, not the tag contents
137 # from each head. I.e. it doesn't try to squeeze out the maximum
138 # from each head. I.e. it doesn't try to squeeze out the maximum
138 # performance, but is simpler has a better chance of actually
139 # performance, but is simpler has a better chance of actually
139 # working correctly. And this gives the biggest performance win: it
140 # working correctly. And this gives the biggest performance win: it
140 # avoids looking up .hgtags in the manifest for every head, and it
141 # avoids looking up .hgtags in the manifest for every head, and it
141 # can avoid calling heads() at all if there have been no changes to
142 # can avoid calling heads() at all if there have been no changes to
142 # the repo.
143 # the repo.
143
144
144 def _readtagcache(ui, repo):
145 def _readtagcache(ui, repo):
145 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
146 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
146 shouldwrite). If the cache is completely up-to-date, cachetags is a
147 shouldwrite). If the cache is completely up-to-date, cachetags is a
147 dict of the form returned by _readtags(); otherwise, it is None and
148 dict of the form returned by _readtags(); otherwise, it is None and
148 heads and fnodes are set. In that case, heads is the list of all
149 heads and fnodes are set. In that case, heads is the list of all
149 heads currently in the repository (ordered from tip to oldest) and
150 heads currently in the repository (ordered from tip to oldest) and
150 fnodes is a mapping from head to .hgtags filenode. If those two are
151 fnodes is a mapping from head to .hgtags filenode. If those two are
151 set, caller is responsible for reading tag info from each head.'''
152 set, caller is responsible for reading tag info from each head.'''
152
153
153 try:
154 try:
154 cachefile = repo.opener('tags.cache', 'r')
155 cachefile = repo.opener(os.path.join('cache', 'tags'), 'r')
155 # force reading the file for static-http
156 # force reading the file for static-http
156 cachelines = iter(cachefile)
157 cachelines = iter(cachefile)
157 except IOError:
158 except IOError:
158 cachefile = None
159 cachefile = None
159
160
160 # The cache file consists of lines like
161 # The cache file consists of lines like
161 # <headrev> <headnode> [<tagnode>]
162 # <headrev> <headnode> [<tagnode>]
162 # where <headrev> and <headnode> redundantly identify a repository
163 # where <headrev> and <headnode> redundantly identify a repository
163 # head from the time the cache was written, and <tagnode> is the
164 # head from the time the cache was written, and <tagnode> is the
164 # filenode of .hgtags on that head. Heads with no .hgtags file will
165 # filenode of .hgtags on that head. Heads with no .hgtags file will
165 # have no <tagnode>. The cache is ordered from tip to oldest (which
166 # have no <tagnode>. The cache is ordered from tip to oldest (which
166 # is part of why <headrev> is there: a quick visual check is all
167 # is part of why <headrev> is there: a quick visual check is all
167 # that's required to ensure correct order).
168 # that's required to ensure correct order).
168 #
169 #
169 # This information is enough to let us avoid the most expensive part
170 # This information is enough to let us avoid the most expensive part
170 # of finding global tags, which is looking up <tagnode> in the
171 # of finding global tags, which is looking up <tagnode> in the
171 # manifest for each head.
172 # manifest for each head.
172 cacherevs = [] # list of headrev
173 cacherevs = [] # list of headrev
173 cacheheads = [] # list of headnode
174 cacheheads = [] # list of headnode
174 cachefnode = {} # map headnode to filenode
175 cachefnode = {} # map headnode to filenode
175 if cachefile:
176 if cachefile:
176 try:
177 try:
177 for line in cachelines:
178 for line in cachelines:
178 if line == "\n":
179 if line == "\n":
179 break
180 break
180 line = line.rstrip().split()
181 line = line.rstrip().split()
181 cacherevs.append(int(line[0]))
182 cacherevs.append(int(line[0]))
182 headnode = bin(line[1])
183 headnode = bin(line[1])
183 cacheheads.append(headnode)
184 cacheheads.append(headnode)
184 if len(line) == 3:
185 if len(line) == 3:
185 fnode = bin(line[2])
186 fnode = bin(line[2])
186 cachefnode[headnode] = fnode
187 cachefnode[headnode] = fnode
187 except (ValueError, TypeError):
188 except (ValueError, TypeError):
188 # corruption of tags.cache, just recompute it
189 # corruption of the tags cache, just recompute it
189 ui.warn(_('.hg/tags.cache is corrupt, rebuilding it\n'))
190 ui.warn(_('.hg/cache/tags is corrupt, rebuilding it\n'))
190 cacheheads = []
191 cacheheads = []
191 cacherevs = []
192 cacherevs = []
192 cachefnode = {}
193 cachefnode = {}
193
194
194 tipnode = repo.changelog.tip()
195 tipnode = repo.changelog.tip()
195 tiprev = len(repo.changelog) - 1
196 tiprev = len(repo.changelog) - 1
196
197
197 # Case 1 (common): tip is the same, so nothing has changed.
198 # Case 1 (common): tip is the same, so nothing has changed.
198 # (Unchanged tip trivially means no changesets have been added.
199 # (Unchanged tip trivially means no changesets have been added.
199 # But, thanks to localrepository.destroyed(), it also means none
200 # But, thanks to localrepository.destroyed(), it also means none
200 # have been destroyed by strip or rollback.)
201 # have been destroyed by strip or rollback.)
201 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
202 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
202 tags = _readtags(ui, repo, cachelines, cachefile.name)
203 tags = _readtags(ui, repo, cachelines, cachefile.name)
203 cachefile.close()
204 cachefile.close()
204 return (None, None, tags, False)
205 return (None, None, tags, False)
205 if cachefile:
206 if cachefile:
206 cachefile.close() # ignore rest of file
207 cachefile.close() # ignore rest of file
207
208
208 repoheads = repo.heads()
209 repoheads = repo.heads()
209 # Case 2 (uncommon): empty repo; get out quickly and don't bother
210 # Case 2 (uncommon): empty repo; get out quickly and don't bother
210 # writing an empty cache.
211 # writing an empty cache.
211 if repoheads == [nullid]:
212 if repoheads == [nullid]:
212 return ([], {}, {}, False)
213 return ([], {}, {}, False)
213
214
214 # Case 3 (uncommon): cache file missing or empty.
215 # Case 3 (uncommon): cache file missing or empty.
215
216
216 # Case 4 (uncommon): tip rev decreased. This should only happen
217 # Case 4 (uncommon): tip rev decreased. This should only happen
217 # when we're called from localrepository.destroyed(). Refresh the
218 # when we're called from localrepository.destroyed(). Refresh the
218 # cache so future invocations will not see disappeared heads in the
219 # cache so future invocations will not see disappeared heads in the
219 # cache.
220 # cache.
220
221
221 # Case 5 (common): tip has changed, so we've added/replaced heads.
222 # Case 5 (common): tip has changed, so we've added/replaced heads.
222
223
223 # As it happens, the code to handle cases 3, 4, 5 is the same.
224 # As it happens, the code to handle cases 3, 4, 5 is the same.
224
225
225 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
226 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
226 # exposed".
227 # exposed".
227 newheads = [head
228 newheads = [head
228 for head in repoheads
229 for head in repoheads
229 if head not in set(cacheheads)]
230 if head not in set(cacheheads)]
230
231
231 # Now we have to lookup the .hgtags filenode for every new head.
232 # Now we have to lookup the .hgtags filenode for every new head.
232 # This is the most expensive part of finding tags, so performance
233 # This is the most expensive part of finding tags, so performance
233 # depends primarily on the size of newheads. Worst case: no cache
234 # depends primarily on the size of newheads. Worst case: no cache
234 # file, so newheads == repoheads.
235 # file, so newheads == repoheads.
235 for head in newheads:
236 for head in newheads:
236 cctx = repo[head]
237 cctx = repo[head]
237 try:
238 try:
238 fnode = cctx.filenode('.hgtags')
239 fnode = cctx.filenode('.hgtags')
239 cachefnode[head] = fnode
240 cachefnode[head] = fnode
240 except error.LookupError:
241 except error.LookupError:
241 # no .hgtags file on this head
242 # no .hgtags file on this head
242 pass
243 pass
243
244
244 # Caller has to iterate over all heads, but can use the filenodes in
245 # Caller has to iterate over all heads, but can use the filenodes in
245 # cachefnode to get to each .hgtags revision quickly.
246 # cachefnode to get to each .hgtags revision quickly.
246 return (repoheads, cachefnode, None, True)
247 return (repoheads, cachefnode, None, True)
247
248
248 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
249 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
249
250
250 try:
251 try:
251 cachefile = repo.opener('tags.cache', 'w', atomictemp=True)
252 cachefile = repo.opener(os.path.join('cache', 'tags'), 'w',
253 atomictemp=True)
252 except (OSError, IOError):
254 except (OSError, IOError):
253 return
255 return
254
256
255 realheads = repo.heads() # for sanity checks below
257 realheads = repo.heads() # for sanity checks below
256 for head in heads:
258 for head in heads:
257 # temporary sanity checks; these can probably be removed
259 # temporary sanity checks; these can probably be removed
258 # once this code has been in crew for a few weeks
260 # once this code has been in crew for a few weeks
259 assert head in repo.changelog.nodemap, \
261 assert head in repo.changelog.nodemap, \
260 'trying to write non-existent node %s to tag cache' % short(head)
262 'trying to write non-existent node %s to tag cache' % short(head)
261 assert head in realheads, \
263 assert head in realheads, \
262 'trying to write non-head %s to tag cache' % short(head)
264 'trying to write non-head %s to tag cache' % short(head)
263 assert head != nullid, \
265 assert head != nullid, \
264 'trying to write nullid to tag cache'
266 'trying to write nullid to tag cache'
265
267
266 # This can't fail because of the first assert above. When/if we
268 # This can't fail because of the first assert above. When/if we
267 # remove that assert, we might want to catch LookupError here
269 # remove that assert, we might want to catch LookupError here
268 # and downgrade it to a warning.
270 # and downgrade it to a warning.
269 rev = repo.changelog.rev(head)
271 rev = repo.changelog.rev(head)
270
272
271 fnode = tagfnode.get(head)
273 fnode = tagfnode.get(head)
272 if fnode:
274 if fnode:
273 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
275 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
274 else:
276 else:
275 cachefile.write('%d %s\n' % (rev, hex(head)))
277 cachefile.write('%d %s\n' % (rev, hex(head)))
276
278
277 # Tag names in the cache are in UTF-8 -- which is the whole reason
279 # Tag names in the cache are in UTF-8 -- which is the whole reason
278 # we keep them in UTF-8 throughout this module. If we converted
280 # we keep them in UTF-8 throughout this module. If we converted
279 # them local encoding on input, we would lose info writing them to
281 # them local encoding on input, we would lose info writing them to
280 # the cache.
282 # the cache.
281 cachefile.write('\n')
283 cachefile.write('\n')
282 for (name, (node, hist)) in cachetags.iteritems():
284 for (name, (node, hist)) in cachetags.iteritems():
283 cachefile.write("%s %s\n" % (hex(node), name))
285 cachefile.write("%s %s\n" % (hex(node), name))
284
286
285 cachefile.rename()
287 cachefile.rename()
@@ -1,331 +1,331 b''
1 $ cat > nlinks.py <<EOF
1 $ cat > nlinks.py <<EOF
2 > import os, sys
2 > import os, sys
3 > for f in sorted(sys.stdin.readlines()):
3 > for f in sorted(sys.stdin.readlines()):
4 > f = f[:-1]
4 > f = f[:-1]
5 > print os.lstat(f).st_nlink, f
5 > print os.lstat(f).st_nlink, f
6 > EOF
6 > EOF
7
7
8 $ nlinksdir()
8 $ nlinksdir()
9 > {
9 > {
10 > find $1 -type f | python $TESTTMP/nlinks.py
10 > find $1 -type f | python $TESTTMP/nlinks.py
11 > }
11 > }
12
12
13 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
13 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
14
14
15 $ cat > linkcp.py <<EOF
15 $ cat > linkcp.py <<EOF
16 > from mercurial import util
16 > from mercurial import util
17 > import sys
17 > import sys
18 > util.copyfiles(sys.argv[1], sys.argv[2], hardlink=True)
18 > util.copyfiles(sys.argv[1], sys.argv[2], hardlink=True)
19 > EOF
19 > EOF
20
20
21 $ linkcp()
21 $ linkcp()
22 > {
22 > {
23 > python $TESTTMP/linkcp.py $1 $2
23 > python $TESTTMP/linkcp.py $1 $2
24 > }
24 > }
25
25
26 Prepare repo r1:
26 Prepare repo r1:
27
27
28 $ mkdir r1
28 $ mkdir r1
29 $ cd r1
29 $ cd r1
30 $ hg init
30 $ hg init
31
31
32 $ echo c1 > f1
32 $ echo c1 > f1
33 $ hg add f1
33 $ hg add f1
34 $ hg ci -m0
34 $ hg ci -m0
35
35
36 $ mkdir d1
36 $ mkdir d1
37 $ cd d1
37 $ cd d1
38 $ echo c2 > f2
38 $ echo c2 > f2
39 $ hg add f2
39 $ hg add f2
40 $ hg ci -m1
40 $ hg ci -m1
41 $ cd ../..
41 $ cd ../..
42
42
43 $ nlinksdir r1/.hg/store
43 $ nlinksdir r1/.hg/store
44 1 r1/.hg/store/00changelog.i
44 1 r1/.hg/store/00changelog.i
45 1 r1/.hg/store/00manifest.i
45 1 r1/.hg/store/00manifest.i
46 1 r1/.hg/store/data/d1/f2.i
46 1 r1/.hg/store/data/d1/f2.i
47 1 r1/.hg/store/data/f1.i
47 1 r1/.hg/store/data/f1.i
48 1 r1/.hg/store/fncache
48 1 r1/.hg/store/fncache
49 1 r1/.hg/store/undo
49 1 r1/.hg/store/undo
50
50
51
51
52 Create hardlinked clone r2:
52 Create hardlinked clone r2:
53
53
54 $ hg clone -U --debug r1 r2
54 $ hg clone -U --debug r1 r2
55 linked 7 files
55 linked 7 files
56
56
57 Create non-hardlinked clone r3:
57 Create non-hardlinked clone r3:
58
58
59 $ hg clone --pull r1 r3
59 $ hg clone --pull r1 r3
60 requesting all changes
60 requesting all changes
61 adding changesets
61 adding changesets
62 adding manifests
62 adding manifests
63 adding file changes
63 adding file changes
64 added 2 changesets with 2 changes to 2 files
64 added 2 changesets with 2 changes to 2 files
65 updating to branch default
65 updating to branch default
66 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
67
67
68
68
69 Repos r1 and r2 should now contain hardlinked files:
69 Repos r1 and r2 should now contain hardlinked files:
70
70
71 $ nlinksdir r1/.hg/store
71 $ nlinksdir r1/.hg/store
72 2 r1/.hg/store/00changelog.i
72 2 r1/.hg/store/00changelog.i
73 2 r1/.hg/store/00manifest.i
73 2 r1/.hg/store/00manifest.i
74 2 r1/.hg/store/data/d1/f2.i
74 2 r1/.hg/store/data/d1/f2.i
75 2 r1/.hg/store/data/f1.i
75 2 r1/.hg/store/data/f1.i
76 2 r1/.hg/store/fncache
76 2 r1/.hg/store/fncache
77 1 r1/.hg/store/undo
77 1 r1/.hg/store/undo
78
78
79 $ nlinksdir r2/.hg/store
79 $ nlinksdir r2/.hg/store
80 2 r2/.hg/store/00changelog.i
80 2 r2/.hg/store/00changelog.i
81 2 r2/.hg/store/00manifest.i
81 2 r2/.hg/store/00manifest.i
82 2 r2/.hg/store/data/d1/f2.i
82 2 r2/.hg/store/data/d1/f2.i
83 2 r2/.hg/store/data/f1.i
83 2 r2/.hg/store/data/f1.i
84 2 r2/.hg/store/fncache
84 2 r2/.hg/store/fncache
85
85
86 Repo r3 should not be hardlinked:
86 Repo r3 should not be hardlinked:
87
87
88 $ nlinksdir r3/.hg/store
88 $ nlinksdir r3/.hg/store
89 1 r3/.hg/store/00changelog.i
89 1 r3/.hg/store/00changelog.i
90 1 r3/.hg/store/00manifest.i
90 1 r3/.hg/store/00manifest.i
91 1 r3/.hg/store/data/d1/f2.i
91 1 r3/.hg/store/data/d1/f2.i
92 1 r3/.hg/store/data/f1.i
92 1 r3/.hg/store/data/f1.i
93 1 r3/.hg/store/fncache
93 1 r3/.hg/store/fncache
94 1 r3/.hg/store/undo
94 1 r3/.hg/store/undo
95
95
96
96
97 Create a non-inlined filelog in r3:
97 Create a non-inlined filelog in r3:
98
98
99 $ cd r3/d1
99 $ cd r3/d1
100 $ python -c 'for x in range(10000): print x' >> data1
100 $ python -c 'for x in range(10000): print x' >> data1
101 $ for j in 0 1 2 3 4 5 6 7 8 9; do
101 $ for j in 0 1 2 3 4 5 6 7 8 9; do
102 > cat data1 >> f2
102 > cat data1 >> f2
103 > hg commit -m$j
103 > hg commit -m$j
104 > done
104 > done
105 $ cd ../..
105 $ cd ../..
106
106
107 $ nlinksdir r3/.hg/store
107 $ nlinksdir r3/.hg/store
108 1 r3/.hg/store/00changelog.i
108 1 r3/.hg/store/00changelog.i
109 1 r3/.hg/store/00manifest.i
109 1 r3/.hg/store/00manifest.i
110 1 r3/.hg/store/data/d1/f2.d
110 1 r3/.hg/store/data/d1/f2.d
111 1 r3/.hg/store/data/d1/f2.i
111 1 r3/.hg/store/data/d1/f2.i
112 1 r3/.hg/store/data/f1.i
112 1 r3/.hg/store/data/f1.i
113 1 r3/.hg/store/fncache
113 1 r3/.hg/store/fncache
114 1 r3/.hg/store/undo
114 1 r3/.hg/store/undo
115
115
116 Push to repo r1 should break up most hardlinks in r2:
116 Push to repo r1 should break up most hardlinks in r2:
117
117
118 $ hg -R r2 verify
118 $ hg -R r2 verify
119 checking changesets
119 checking changesets
120 checking manifests
120 checking manifests
121 crosschecking files in changesets and manifests
121 crosschecking files in changesets and manifests
122 checking files
122 checking files
123 2 files, 2 changesets, 2 total revisions
123 2 files, 2 changesets, 2 total revisions
124
124
125 $ cd r3
125 $ cd r3
126 $ hg push
126 $ hg push
127 pushing to $TESTTMP/r1
127 pushing to $TESTTMP/r1
128 searching for changes
128 searching for changes
129 adding changesets
129 adding changesets
130 adding manifests
130 adding manifests
131 adding file changes
131 adding file changes
132 added 10 changesets with 10 changes to 1 files
132 added 10 changesets with 10 changes to 1 files
133
133
134 $ cd ..
134 $ cd ..
135
135
136 $ nlinksdir r2/.hg/store
136 $ nlinksdir r2/.hg/store
137 1 r2/.hg/store/00changelog.i
137 1 r2/.hg/store/00changelog.i
138 1 r2/.hg/store/00manifest.i
138 1 r2/.hg/store/00manifest.i
139 1 r2/.hg/store/data/d1/f2.i
139 1 r2/.hg/store/data/d1/f2.i
140 2 r2/.hg/store/data/f1.i
140 2 r2/.hg/store/data/f1.i
141 1 r2/.hg/store/fncache
141 1 r2/.hg/store/fncache
142
142
143 $ hg -R r2 verify
143 $ hg -R r2 verify
144 checking changesets
144 checking changesets
145 checking manifests
145 checking manifests
146 crosschecking files in changesets and manifests
146 crosschecking files in changesets and manifests
147 checking files
147 checking files
148 2 files, 2 changesets, 2 total revisions
148 2 files, 2 changesets, 2 total revisions
149
149
150
150
151 $ cd r1
151 $ cd r1
152 $ hg up
152 $ hg up
153 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
153 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
154
154
155 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
155 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
156
156
157 $ echo c1c1 >> f1
157 $ echo c1c1 >> f1
158 $ hg ci -m00
158 $ hg ci -m00
159 $ cd ..
159 $ cd ..
160
160
161 $ nlinksdir r2/.hg/store
161 $ nlinksdir r2/.hg/store
162 1 r2/.hg/store/00changelog.i
162 1 r2/.hg/store/00changelog.i
163 1 r2/.hg/store/00manifest.i
163 1 r2/.hg/store/00manifest.i
164 1 r2/.hg/store/data/d1/f2.i
164 1 r2/.hg/store/data/d1/f2.i
165 1 r2/.hg/store/data/f1.i
165 1 r2/.hg/store/data/f1.i
166 1 r2/.hg/store/fncache
166 1 r2/.hg/store/fncache
167
167
168
168
169 $ cd r3
169 $ cd r3
170 $ hg tip --template '{rev}:{node|short}\n'
170 $ hg tip --template '{rev}:{node|short}\n'
171 11:a6451b6bc41f
171 11:a6451b6bc41f
172 $ echo bla > f1
172 $ echo bla > f1
173 $ hg ci -m1
173 $ hg ci -m1
174 $ cd ..
174 $ cd ..
175
175
176 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
176 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
177
177
178 $ linkcp r3 r4
178 $ linkcp r3 r4
179
179
180 r4 has hardlinks in the working dir (not just inside .hg):
180 r4 has hardlinks in the working dir (not just inside .hg):
181
181
182 $ nlinksdir r4
182 $ nlinksdir r4
183 2 r4/.hg/00changelog.i
183 2 r4/.hg/00changelog.i
184 2 r4/.hg/branch
184 2 r4/.hg/branch
185 2 r4/.hg/branchheads.cache
185 2 r4/.hg/cache/branchheads
186 2 r4/.hg/cache/tags
186 2 r4/.hg/dirstate
187 2 r4/.hg/dirstate
187 2 r4/.hg/hgrc
188 2 r4/.hg/hgrc
188 2 r4/.hg/last-message.txt
189 2 r4/.hg/last-message.txt
189 2 r4/.hg/requires
190 2 r4/.hg/requires
190 2 r4/.hg/store/00changelog.i
191 2 r4/.hg/store/00changelog.i
191 2 r4/.hg/store/00manifest.i
192 2 r4/.hg/store/00manifest.i
192 2 r4/.hg/store/data/d1/f2.d
193 2 r4/.hg/store/data/d1/f2.d
193 2 r4/.hg/store/data/d1/f2.i
194 2 r4/.hg/store/data/d1/f2.i
194 2 r4/.hg/store/data/f1.i
195 2 r4/.hg/store/data/f1.i
195 2 r4/.hg/store/fncache
196 2 r4/.hg/store/fncache
196 2 r4/.hg/store/undo
197 2 r4/.hg/store/undo
197 2 r4/.hg/tags.cache
198 2 r4/.hg/undo.branch
198 2 r4/.hg/undo.branch
199 2 r4/.hg/undo.desc
199 2 r4/.hg/undo.desc
200 2 r4/.hg/undo.dirstate
200 2 r4/.hg/undo.dirstate
201 2 r4/d1/data1
201 2 r4/d1/data1
202 2 r4/d1/f2
202 2 r4/d1/f2
203 2 r4/f1
203 2 r4/f1
204
204
205 Update back to revision 11 in r4 should break hardlink of file f1:
205 Update back to revision 11 in r4 should break hardlink of file f1:
206
206
207 $ hg -R r4 up 11
207 $ hg -R r4 up 11
208 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
208 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
209
209
210 $ nlinksdir r4
210 $ nlinksdir r4
211 2 r4/.hg/00changelog.i
211 2 r4/.hg/00changelog.i
212 1 r4/.hg/branch
212 1 r4/.hg/branch
213 2 r4/.hg/branchheads.cache
213 2 r4/.hg/cache/branchheads
214 2 r4/.hg/cache/tags
214 1 r4/.hg/dirstate
215 1 r4/.hg/dirstate
215 2 r4/.hg/hgrc
216 2 r4/.hg/hgrc
216 2 r4/.hg/last-message.txt
217 2 r4/.hg/last-message.txt
217 2 r4/.hg/requires
218 2 r4/.hg/requires
218 2 r4/.hg/store/00changelog.i
219 2 r4/.hg/store/00changelog.i
219 2 r4/.hg/store/00manifest.i
220 2 r4/.hg/store/00manifest.i
220 2 r4/.hg/store/data/d1/f2.d
221 2 r4/.hg/store/data/d1/f2.d
221 2 r4/.hg/store/data/d1/f2.i
222 2 r4/.hg/store/data/d1/f2.i
222 2 r4/.hg/store/data/f1.i
223 2 r4/.hg/store/data/f1.i
223 2 r4/.hg/store/fncache
224 2 r4/.hg/store/fncache
224 2 r4/.hg/store/undo
225 2 r4/.hg/store/undo
225 2 r4/.hg/tags.cache
226 2 r4/.hg/undo.branch
226 2 r4/.hg/undo.branch
227 2 r4/.hg/undo.desc
227 2 r4/.hg/undo.desc
228 2 r4/.hg/undo.dirstate
228 2 r4/.hg/undo.dirstate
229 2 r4/d1/data1
229 2 r4/d1/data1
230 2 r4/d1/f2
230 2 r4/d1/f2
231 1 r4/f1
231 1 r4/f1
232
232
233
233
234 Test hardlinking outside hg:
234 Test hardlinking outside hg:
235
235
236 $ mkdir x
236 $ mkdir x
237 $ echo foo > x/a
237 $ echo foo > x/a
238
238
239 $ linkcp x y
239 $ linkcp x y
240 $ echo bar >> y/a
240 $ echo bar >> y/a
241
241
242 No diff if hardlink:
242 No diff if hardlink:
243
243
244 $ diff x/a y/a
244 $ diff x/a y/a
245
245
246 Test mq hardlinking:
246 Test mq hardlinking:
247
247
248 $ echo "[extensions]" >> $HGRCPATH
248 $ echo "[extensions]" >> $HGRCPATH
249 $ echo "mq=" >> $HGRCPATH
249 $ echo "mq=" >> $HGRCPATH
250
250
251 $ hg init a
251 $ hg init a
252 $ cd a
252 $ cd a
253
253
254 $ hg qimport -n foo - << EOF
254 $ hg qimport -n foo - << EOF
255 > # HG changeset patch
255 > # HG changeset patch
256 > # Date 1 0
256 > # Date 1 0
257 > diff -r 2588a8b53d66 a
257 > diff -r 2588a8b53d66 a
258 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
258 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
259 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
259 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
260 > @@ -0,0 +1,1 @@
260 > @@ -0,0 +1,1 @@
261 > +a
261 > +a
262 > EOF
262 > EOF
263 adding foo to series file
263 adding foo to series file
264
264
265 $ hg qpush
265 $ hg qpush
266 applying foo
266 applying foo
267 now at: foo
267 now at: foo
268
268
269 $ cd ..
269 $ cd ..
270 $ linkcp a b
270 $ linkcp a b
271 $ cd b
271 $ cd b
272
272
273 $ hg qimport -n bar - << EOF
273 $ hg qimport -n bar - << EOF
274 > # HG changeset patch
274 > # HG changeset patch
275 > # Date 2 0
275 > # Date 2 0
276 > diff -r 2588a8b53d66 a
276 > diff -r 2588a8b53d66 a
277 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
277 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
278 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
278 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
279 > @@ -0,0 +1,1 @@
279 > @@ -0,0 +1,1 @@
280 > +b
280 > +b
281 > EOF
281 > EOF
282 adding bar to series file
282 adding bar to series file
283
283
284 $ hg qpush
284 $ hg qpush
285 applying bar
285 applying bar
286 now at: bar
286 now at: bar
287
287
288 $ cat .hg/patches/status
288 $ cat .hg/patches/status
289 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
289 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
290 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
290 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
291
291
292 $ cat .hg/patches/series
292 $ cat .hg/patches/series
293 foo
293 foo
294 bar
294 bar
295
295
296 $ cat ../a/.hg/patches/status
296 $ cat ../a/.hg/patches/status
297 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
297 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
298
298
299 $ cat ../a/.hg/patches/series
299 $ cat ../a/.hg/patches/series
300 foo
300 foo
301
301
302 Test tags hardlinking:
302 Test tags hardlinking:
303
303
304 $ hg qdel -r qbase:qtip
304 $ hg qdel -r qbase:qtip
305 patch foo finalized without changeset message
305 patch foo finalized without changeset message
306 patch bar finalized without changeset message
306 patch bar finalized without changeset message
307
307
308 $ hg tag -l lfoo
308 $ hg tag -l lfoo
309 $ hg tag foo
309 $ hg tag foo
310
310
311 $ cd ..
311 $ cd ..
312 $ linkcp b c
312 $ linkcp b c
313 $ cd c
313 $ cd c
314
314
315 $ hg tag -l -r 0 lbar
315 $ hg tag -l -r 0 lbar
316 $ hg tag -r 0 bar
316 $ hg tag -r 0 bar
317
317
318 $ cat .hgtags
318 $ cat .hgtags
319 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
319 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
320 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
320 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
321
321
322 $ cat .hg/localtags
322 $ cat .hg/localtags
323 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
323 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
324 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
324 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
325
325
326 $ cat ../b/.hgtags
326 $ cat ../b/.hgtags
327 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
327 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
328
328
329 $ cat ../b/.hg/localtags
329 $ cat ../b/.hg/localtags
330 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
330 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
331
331
@@ -1,140 +1,141 b''
1 test that new files created in .hg inherit the permissions from .hg/store
1 test that new files created in .hg inherit the permissions from .hg/store
2
2
3
3
4 $ "$TESTDIR/hghave" unix-permissions || exit 80
4 $ "$TESTDIR/hghave" unix-permissions || exit 80
5
5
6 $ mkdir dir
6 $ mkdir dir
7
7
8 just in case somebody has a strange $TMPDIR
8 just in case somebody has a strange $TMPDIR
9
9
10 $ chmod g-s dir
10 $ chmod g-s dir
11 $ cd dir
11 $ cd dir
12
12
13 $ cat >printmodes.py <<EOF
13 $ cat >printmodes.py <<EOF
14 > import os, sys
14 > import os, sys
15 >
15 >
16 > allnames = []
16 > allnames = []
17 > isdir = {}
17 > isdir = {}
18 > for root, dirs, files in os.walk(sys.argv[1]):
18 > for root, dirs, files in os.walk(sys.argv[1]):
19 > for d in dirs:
19 > for d in dirs:
20 > name = os.path.join(root, d)
20 > name = os.path.join(root, d)
21 > isdir[name] = 1
21 > isdir[name] = 1
22 > allnames.append(name)
22 > allnames.append(name)
23 > for f in files:
23 > for f in files:
24 > name = os.path.join(root, f)
24 > name = os.path.join(root, f)
25 > allnames.append(name)
25 > allnames.append(name)
26 > allnames.sort()
26 > allnames.sort()
27 > for name in allnames:
27 > for name in allnames:
28 > suffix = name in isdir and '/' or ''
28 > suffix = name in isdir and '/' or ''
29 > print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
29 > print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
30 > EOF
30 > EOF
31
31
32 $ cat >mode.py <<EOF
32 $ cat >mode.py <<EOF
33 > import sys
33 > import sys
34 > import os
34 > import os
35 > print '%05o' % os.lstat(sys.argv[1]).st_mode
35 > print '%05o' % os.lstat(sys.argv[1]).st_mode
36 > EOF
36 > EOF
37
37
38 $ umask 077
38 $ umask 077
39
39
40 $ hg init repo
40 $ hg init repo
41 $ cd repo
41 $ cd repo
42
42
43 $ chmod 0770 .hg/store
43 $ chmod 0770 .hg/store
44
44
45 before commit
45 before commit
46 store can be written by the group, other files cannot
46 store can be written by the group, other files cannot
47 store is setgid
47 store is setgid
48
48
49 $ python ../printmodes.py .
49 $ python ../printmodes.py .
50 00700 ./.hg/
50 00700 ./.hg/
51 00600 ./.hg/00changelog.i
51 00600 ./.hg/00changelog.i
52 00600 ./.hg/requires
52 00600 ./.hg/requires
53 00770 ./.hg/store/
53 00770 ./.hg/store/
54
54
55 $ mkdir dir
55 $ mkdir dir
56 $ touch foo dir/bar
56 $ touch foo dir/bar
57 $ hg ci -qAm 'add files'
57 $ hg ci -qAm 'add files'
58
58
59 after commit
59 after commit
60 working dir files can only be written by the owner
60 working dir files can only be written by the owner
61 files created in .hg can be written by the group
61 files created in .hg can be written by the group
62 (in particular, store/**, dirstate, branch cache file, undo files)
62 (in particular, store/**, dirstate, branch cache file, undo files)
63 new directories are setgid
63 new directories are setgid
64
64
65 $ python ../printmodes.py .
65 $ python ../printmodes.py .
66 00700 ./.hg/
66 00700 ./.hg/
67 00600 ./.hg/00changelog.i
67 00600 ./.hg/00changelog.i
68 00660 ./.hg/dirstate
68 00660 ./.hg/dirstate
69 00660 ./.hg/last-message.txt
69 00660 ./.hg/last-message.txt
70 00600 ./.hg/requires
70 00600 ./.hg/requires
71 00770 ./.hg/store/
71 00770 ./.hg/store/
72 00660 ./.hg/store/00changelog.i
72 00660 ./.hg/store/00changelog.i
73 00660 ./.hg/store/00manifest.i
73 00660 ./.hg/store/00manifest.i
74 00770 ./.hg/store/data/
74 00770 ./.hg/store/data/
75 00770 ./.hg/store/data/dir/
75 00770 ./.hg/store/data/dir/
76 00660 ./.hg/store/data/dir/bar.i
76 00660 ./.hg/store/data/dir/bar.i
77 00660 ./.hg/store/data/foo.i
77 00660 ./.hg/store/data/foo.i
78 00660 ./.hg/store/fncache
78 00660 ./.hg/store/fncache
79 00660 ./.hg/store/undo
79 00660 ./.hg/store/undo
80 00660 ./.hg/undo.branch
80 00660 ./.hg/undo.branch
81 00660 ./.hg/undo.desc
81 00660 ./.hg/undo.desc
82 00660 ./.hg/undo.dirstate
82 00660 ./.hg/undo.dirstate
83 00700 ./dir/
83 00700 ./dir/
84 00600 ./dir/bar
84 00600 ./dir/bar
85 00600 ./foo
85 00600 ./foo
86
86
87 $ umask 007
87 $ umask 007
88 $ hg init ../push
88 $ hg init ../push
89
89
90 before push
90 before push
91 group can write everything
91 group can write everything
92
92
93 $ python ../printmodes.py ../push
93 $ python ../printmodes.py ../push
94 00770 ../push/.hg/
94 00770 ../push/.hg/
95 00660 ../push/.hg/00changelog.i
95 00660 ../push/.hg/00changelog.i
96 00660 ../push/.hg/requires
96 00660 ../push/.hg/requires
97 00770 ../push/.hg/store/
97 00770 ../push/.hg/store/
98
98
99 $ umask 077
99 $ umask 077
100 $ hg -q push ../push
100 $ hg -q push ../push
101
101
102 after push
102 after push
103 group can still write everything
103 group can still write everything
104
104
105 $ python ../printmodes.py ../push
105 $ python ../printmodes.py ../push
106 00770 ../push/.hg/
106 00770 ../push/.hg/
107 00660 ../push/.hg/00changelog.i
107 00660 ../push/.hg/00changelog.i
108 00660 ../push/.hg/branchheads.cache
108 00770 ../push/.hg/cache/
109 00660 ../push/.hg/cache/branchheads
109 00660 ../push/.hg/requires
110 00660 ../push/.hg/requires
110 00770 ../push/.hg/store/
111 00770 ../push/.hg/store/
111 00660 ../push/.hg/store/00changelog.i
112 00660 ../push/.hg/store/00changelog.i
112 00660 ../push/.hg/store/00manifest.i
113 00660 ../push/.hg/store/00manifest.i
113 00770 ../push/.hg/store/data/
114 00770 ../push/.hg/store/data/
114 00770 ../push/.hg/store/data/dir/
115 00770 ../push/.hg/store/data/dir/
115 00660 ../push/.hg/store/data/dir/bar.i
116 00660 ../push/.hg/store/data/dir/bar.i
116 00660 ../push/.hg/store/data/foo.i
117 00660 ../push/.hg/store/data/foo.i
117 00660 ../push/.hg/store/fncache
118 00660 ../push/.hg/store/fncache
118 00660 ../push/.hg/store/undo
119 00660 ../push/.hg/store/undo
119 00660 ../push/.hg/undo.branch
120 00660 ../push/.hg/undo.branch
120 00660 ../push/.hg/undo.desc
121 00660 ../push/.hg/undo.desc
121 00660 ../push/.hg/undo.dirstate
122 00660 ../push/.hg/undo.dirstate
122
123
123
124
124 Test that we don't lose the setgid bit when we call chmod.
125 Test that we don't lose the setgid bit when we call chmod.
125 Not all systems support setgid directories (e.g. HFS+), so
126 Not all systems support setgid directories (e.g. HFS+), so
126 just check that directories have the same mode.
127 just check that directories have the same mode.
127
128
128 $ cd ..
129 $ cd ..
129 $ hg init setgid
130 $ hg init setgid
130 $ cd setgid
131 $ cd setgid
131 $ chmod g+rwx .hg/store
132 $ chmod g+rwx .hg/store
132 $ chmod g+s .hg/store 2> /dev/null
133 $ chmod g+s .hg/store 2> /dev/null
133 $ mkdir dir
134 $ mkdir dir
134 $ touch dir/file
135 $ touch dir/file
135 $ hg ci -qAm 'add dir/file'
136 $ hg ci -qAm 'add dir/file'
136 $ storemode=`python ../mode.py .hg/store`
137 $ storemode=`python ../mode.py .hg/store`
137 $ dirmode=`python ../mode.py .hg/store/data/dir`
138 $ dirmode=`python ../mode.py .hg/store/data/dir`
138 $ if [ "$storemode" != "$dirmode" ]; then
139 $ if [ "$storemode" != "$dirmode" ]; then
139 > echo "$storemode != $dirmode"
140 > echo "$storemode != $dirmode"
140 $ fi
141 $ fi
@@ -1,124 +1,124 b''
1 $ branches=.hg/branchheads.cache
1 $ branches=.hg/cache/branchheads
2 $ echo '[extensions]' >> $HGRCPATH
2 $ echo '[extensions]' >> $HGRCPATH
3 $ echo 'mq =' >> $HGRCPATH
3 $ echo 'mq =' >> $HGRCPATH
4
4
5 $ show_branch_cache()
5 $ show_branch_cache()
6 > {
6 > {
7 > # force cache (re)generation
7 > # force cache (re)generation
8 > hg log -r does-not-exist 2> /dev/null
8 > hg log -r does-not-exist 2> /dev/null
9 > hg log -r tip --template 'tip: {rev}\n'
9 > hg log -r tip --template 'tip: {rev}\n'
10 > if [ -f $branches ]; then
10 > if [ -f $branches ]; then
11 > sort $branches
11 > sort $branches
12 > else
12 > else
13 > echo No branch cache
13 > echo No branch cache
14 > fi
14 > fi
15 > if [ "$1" = 1 ]; then
15 > if [ "$1" = 1 ]; then
16 > for b in foo bar; do
16 > for b in foo bar; do
17 > hg log -r $b --template "branch $b: "'{rev}\n'
17 > hg log -r $b --template "branch $b: "'{rev}\n'
18 > done
18 > done
19 > fi
19 > fi
20 > }
20 > }
21
21
22 $ hg init a
22 $ hg init a
23 $ cd a
23 $ cd a
24 $ hg qinit -c
24 $ hg qinit -c
25
25
26
26
27 mq patch on an empty repo
27 mq patch on an empty repo
28
28
29 $ hg qnew p1
29 $ hg qnew p1
30 $ show_branch_cache
30 $ show_branch_cache
31 tip: 0
31 tip: 0
32 No branch cache
32 No branch cache
33
33
34 $ echo > pfile
34 $ echo > pfile
35 $ hg add pfile
35 $ hg add pfile
36 $ hg qrefresh -m 'patch 1'
36 $ hg qrefresh -m 'patch 1'
37 $ show_branch_cache
37 $ show_branch_cache
38 tip: 0
38 tip: 0
39 No branch cache
39 No branch cache
40
40
41 some regular revisions
41 some regular revisions
42
42
43 $ hg qpop
43 $ hg qpop
44 popping p1
44 popping p1
45 patch queue now empty
45 patch queue now empty
46 $ echo foo > foo
46 $ echo foo > foo
47 $ hg add foo
47 $ hg add foo
48 $ echo foo > .hg/branch
48 $ echo foo > .hg/branch
49 $ hg ci -m 'branch foo'
49 $ hg ci -m 'branch foo'
50
50
51 $ echo bar > bar
51 $ echo bar > bar
52 $ hg add bar
52 $ hg add bar
53 $ echo bar > .hg/branch
53 $ echo bar > .hg/branch
54 $ hg ci -m 'branch bar'
54 $ hg ci -m 'branch bar'
55 $ show_branch_cache
55 $ show_branch_cache
56 tip: 1
56 tip: 1
57 c229711f16da3d7591f89b1b8d963b79bda22714 1
57 c229711f16da3d7591f89b1b8d963b79bda22714 1
58 c229711f16da3d7591f89b1b8d963b79bda22714 bar
58 c229711f16da3d7591f89b1b8d963b79bda22714 bar
59 dc25e3827021582e979f600811852e36cbe57341 foo
59 dc25e3827021582e979f600811852e36cbe57341 foo
60
60
61 add some mq patches
61 add some mq patches
62
62
63 $ hg qpush
63 $ hg qpush
64 applying p1
64 applying p1
65 now at: p1
65 now at: p1
66 $ show_branch_cache
66 $ show_branch_cache
67 tip: 2
67 tip: 2
68 c229711f16da3d7591f89b1b8d963b79bda22714 1
68 c229711f16da3d7591f89b1b8d963b79bda22714 1
69 c229711f16da3d7591f89b1b8d963b79bda22714 bar
69 c229711f16da3d7591f89b1b8d963b79bda22714 bar
70 dc25e3827021582e979f600811852e36cbe57341 foo
70 dc25e3827021582e979f600811852e36cbe57341 foo
71
71
72 $ hg qnew p2
72 $ hg qnew p2
73 $ echo foo > .hg/branch
73 $ echo foo > .hg/branch
74 $ echo foo2 >> foo
74 $ echo foo2 >> foo
75 $ hg qrefresh -m 'patch 2'
75 $ hg qrefresh -m 'patch 2'
76 $ show_branch_cache 1
76 $ show_branch_cache 1
77 tip: 3
77 tip: 3
78 c229711f16da3d7591f89b1b8d963b79bda22714 1
78 c229711f16da3d7591f89b1b8d963b79bda22714 1
79 c229711f16da3d7591f89b1b8d963b79bda22714 bar
79 c229711f16da3d7591f89b1b8d963b79bda22714 bar
80 dc25e3827021582e979f600811852e36cbe57341 foo
80 dc25e3827021582e979f600811852e36cbe57341 foo
81 branch foo: 3
81 branch foo: 3
82 branch bar: 2
82 branch bar: 2
83
83
84 removing the cache
84 removing the cache
85
85
86 $ rm $branches
86 $ rm $branches
87 $ show_branch_cache 1
87 $ show_branch_cache 1
88 tip: 3
88 tip: 3
89 c229711f16da3d7591f89b1b8d963b79bda22714 1
89 c229711f16da3d7591f89b1b8d963b79bda22714 1
90 c229711f16da3d7591f89b1b8d963b79bda22714 bar
90 c229711f16da3d7591f89b1b8d963b79bda22714 bar
91 dc25e3827021582e979f600811852e36cbe57341 foo
91 dc25e3827021582e979f600811852e36cbe57341 foo
92 branch foo: 3
92 branch foo: 3
93 branch bar: 2
93 branch bar: 2
94
94
95 importing rev 1 (the cache now ends in one of the patches)
95 importing rev 1 (the cache now ends in one of the patches)
96
96
97 $ hg qimport -r 1 -n p0
97 $ hg qimport -r 1 -n p0
98 $ show_branch_cache 1
98 $ show_branch_cache 1
99 tip: 3
99 tip: 3
100 c229711f16da3d7591f89b1b8d963b79bda22714 1
100 c229711f16da3d7591f89b1b8d963b79bda22714 1
101 c229711f16da3d7591f89b1b8d963b79bda22714 bar
101 c229711f16da3d7591f89b1b8d963b79bda22714 bar
102 dc25e3827021582e979f600811852e36cbe57341 foo
102 dc25e3827021582e979f600811852e36cbe57341 foo
103 branch foo: 3
103 branch foo: 3
104 branch bar: 2
104 branch bar: 2
105 $ hg log -r qbase --template 'qbase: {rev}\n'
105 $ hg log -r qbase --template 'qbase: {rev}\n'
106 qbase: 1
106 qbase: 1
107
107
108 detect an invalid cache
108 detect an invalid cache
109
109
110 $ hg qpop -a
110 $ hg qpop -a
111 popping p2
111 popping p2
112 popping p1
112 popping p1
113 popping p0
113 popping p0
114 patch queue now empty
114 patch queue now empty
115 $ hg qpush -a
115 $ hg qpush -a
116 applying p0
116 applying p0
117 applying p1
117 applying p1
118 applying p2
118 applying p2
119 now at: p2
119 now at: p2
120 $ show_branch_cache
120 $ show_branch_cache
121 tip: 3
121 tip: 3
122 dc25e3827021582e979f600811852e36cbe57341 0
122 dc25e3827021582e979f600811852e36cbe57341 0
123 dc25e3827021582e979f600811852e36cbe57341 foo
123 dc25e3827021582e979f600811852e36cbe57341 foo
124
124
@@ -1,1368 +1,1368 b''
1 $ checkundo()
1 $ checkundo()
2 > {
2 > {
3 > if [ -f .hg/store/undo ]; then
3 > if [ -f .hg/store/undo ]; then
4 > echo ".hg/store/undo still exists after $1"
4 > echo ".hg/store/undo still exists after $1"
5 > fi
5 > fi
6 > }
6 > }
7
7
8 $ echo "[extensions]" >> $HGRCPATH
8 $ echo "[extensions]" >> $HGRCPATH
9 $ echo "mq=" >> $HGRCPATH
9 $ echo "mq=" >> $HGRCPATH
10
10
11 $ echo "[mq]" >> $HGRCPATH
11 $ echo "[mq]" >> $HGRCPATH
12 $ echo "plain=true" >> $HGRCPATH
12 $ echo "plain=true" >> $HGRCPATH
13
13
14
14
15 help
15 help
16
16
17 $ hg help mq
17 $ hg help mq
18 mq extension - manage a stack of patches
18 mq extension - manage a stack of patches
19
19
20 This extension lets you work with a stack of patches in a Mercurial
20 This extension lets you work with a stack of patches in a Mercurial
21 repository. It manages two stacks of patches - all known patches, and applied
21 repository. It manages two stacks of patches - all known patches, and applied
22 patches (subset of known patches).
22 patches (subset of known patches).
23
23
24 Known patches are represented as patch files in the .hg/patches directory.
24 Known patches are represented as patch files in the .hg/patches directory.
25 Applied patches are both patch files and changesets.
25 Applied patches are both patch files and changesets.
26
26
27 Common tasks (use "hg help command" for more details):
27 Common tasks (use "hg help command" for more details):
28
28
29 create new patch qnew
29 create new patch qnew
30 import existing patch qimport
30 import existing patch qimport
31
31
32 print patch series qseries
32 print patch series qseries
33 print applied patches qapplied
33 print applied patches qapplied
34
34
35 add known patch to applied stack qpush
35 add known patch to applied stack qpush
36 remove patch from applied stack qpop
36 remove patch from applied stack qpop
37 refresh contents of top applied patch qrefresh
37 refresh contents of top applied patch qrefresh
38
38
39 By default, mq will automatically use git patches when required to avoid
39 By default, mq will automatically use git patches when required to avoid
40 losing file mode changes, copy records, binary files or empty files creations
40 losing file mode changes, copy records, binary files or empty files creations
41 or deletions. This behaviour can be configured with:
41 or deletions. This behaviour can be configured with:
42
42
43 [mq]
43 [mq]
44 git = auto/keep/yes/no
44 git = auto/keep/yes/no
45
45
46 If set to 'keep', mq will obey the [diff] section configuration while
46 If set to 'keep', mq will obey the [diff] section configuration while
47 preserving existing git patches upon qrefresh. If set to 'yes' or 'no', mq
47 preserving existing git patches upon qrefresh. If set to 'yes' or 'no', mq
48 will override the [diff] section and always generate git or regular patches,
48 will override the [diff] section and always generate git or regular patches,
49 possibly losing data in the second case.
49 possibly losing data in the second case.
50
50
51 You will by default be managing a patch queue named "patches". You can create
51 You will by default be managing a patch queue named "patches". You can create
52 other, independent patch queues with the "hg qqueue" command.
52 other, independent patch queues with the "hg qqueue" command.
53
53
54 list of commands:
54 list of commands:
55
55
56 qapplied print the patches already applied
56 qapplied print the patches already applied
57 qclone clone main and patch repository at same time
57 qclone clone main and patch repository at same time
58 qdelete remove patches from queue
58 qdelete remove patches from queue
59 qdiff diff of the current patch and subsequent modifications
59 qdiff diff of the current patch and subsequent modifications
60 qfinish move applied patches into repository history
60 qfinish move applied patches into repository history
61 qfold fold the named patches into the current patch
61 qfold fold the named patches into the current patch
62 qgoto push or pop patches until named patch is at top of stack
62 qgoto push or pop patches until named patch is at top of stack
63 qguard set or print guards for a patch
63 qguard set or print guards for a patch
64 qheader print the header of the topmost or specified patch
64 qheader print the header of the topmost or specified patch
65 qimport import a patch
65 qimport import a patch
66 qnew create a new patch
66 qnew create a new patch
67 qnext print the name of the next patch
67 qnext print the name of the next patch
68 qpop pop the current patch off the stack
68 qpop pop the current patch off the stack
69 qprev print the name of the previous patch
69 qprev print the name of the previous patch
70 qpush push the next patch onto the stack
70 qpush push the next patch onto the stack
71 qqueue manage multiple patch queues
71 qqueue manage multiple patch queues
72 qrefresh update the current patch
72 qrefresh update the current patch
73 qrename rename a patch
73 qrename rename a patch
74 qselect set or print guarded patches to push
74 qselect set or print guarded patches to push
75 qseries print the entire series file
75 qseries print the entire series file
76 qtop print the name of the current patch
76 qtop print the name of the current patch
77 qunapplied print the patches not yet applied
77 qunapplied print the patches not yet applied
78 strip strip changesets and all their descendants from the repository
78 strip strip changesets and all their descendants from the repository
79
79
80 use "hg -v help mq" to show builtin aliases and global options
80 use "hg -v help mq" to show builtin aliases and global options
81
81
82 $ hg init a
82 $ hg init a
83 $ cd a
83 $ cd a
84 $ echo a > a
84 $ echo a > a
85 $ hg ci -Ama
85 $ hg ci -Ama
86 adding a
86 adding a
87
87
88 $ hg clone . ../k
88 $ hg clone . ../k
89 updating to branch default
89 updating to branch default
90 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
91
91
92 $ mkdir b
92 $ mkdir b
93 $ echo z > b/z
93 $ echo z > b/z
94 $ hg ci -Ama
94 $ hg ci -Ama
95 adding b/z
95 adding b/z
96
96
97
97
98 qinit
98 qinit
99
99
100 $ hg qinit
100 $ hg qinit
101
101
102 $ cd ..
102 $ cd ..
103 $ hg init b
103 $ hg init b
104
104
105
105
106 -R qinit
106 -R qinit
107
107
108 $ hg -R b qinit
108 $ hg -R b qinit
109
109
110 $ hg init c
110 $ hg init c
111
111
112
112
113 qinit -c
113 qinit -c
114
114
115 $ hg --cwd c qinit -c
115 $ hg --cwd c qinit -c
116 $ hg -R c/.hg/patches st
116 $ hg -R c/.hg/patches st
117 A .hgignore
117 A .hgignore
118 A series
118 A series
119
119
120
120
121 qinit; qinit -c
121 qinit; qinit -c
122
122
123 $ hg init d
123 $ hg init d
124 $ cd d
124 $ cd d
125 $ hg qinit
125 $ hg qinit
126 $ hg qinit -c
126 $ hg qinit -c
127
127
128 qinit -c should create both files if they don't exist
128 qinit -c should create both files if they don't exist
129
129
130 $ cat .hg/patches/.hgignore
130 $ cat .hg/patches/.hgignore
131 ^\.hg
131 ^\.hg
132 ^\.mq
132 ^\.mq
133 syntax: glob
133 syntax: glob
134 status
134 status
135 guards
135 guards
136 $ cat .hg/patches/series
136 $ cat .hg/patches/series
137 $ hg qinit -c
137 $ hg qinit -c
138 abort: repository $TESTTMP/d/.hg/patches already exists!
138 abort: repository $TESTTMP/d/.hg/patches already exists!
139 [255]
139 [255]
140 $ cd ..
140 $ cd ..
141
141
142 $ echo '% qinit; <stuff>; qinit -c'
142 $ echo '% qinit; <stuff>; qinit -c'
143 % qinit; <stuff>; qinit -c
143 % qinit; <stuff>; qinit -c
144 $ hg init e
144 $ hg init e
145 $ cd e
145 $ cd e
146 $ hg qnew A
146 $ hg qnew A
147 $ checkundo qnew
147 $ checkundo qnew
148 $ echo foo > foo
148 $ echo foo > foo
149 $ hg add foo
149 $ hg add foo
150 $ hg qrefresh
150 $ hg qrefresh
151 $ hg qnew B
151 $ hg qnew B
152 $ echo >> foo
152 $ echo >> foo
153 $ hg qrefresh
153 $ hg qrefresh
154 $ echo status >> .hg/patches/.hgignore
154 $ echo status >> .hg/patches/.hgignore
155 $ echo bleh >> .hg/patches/.hgignore
155 $ echo bleh >> .hg/patches/.hgignore
156 $ hg qinit -c
156 $ hg qinit -c
157 adding .hg/patches/A
157 adding .hg/patches/A
158 adding .hg/patches/B
158 adding .hg/patches/B
159 $ hg -R .hg/patches status
159 $ hg -R .hg/patches status
160 A .hgignore
160 A .hgignore
161 A A
161 A A
162 A B
162 A B
163 A series
163 A series
164
164
165 qinit -c shouldn't touch these files if they already exist
165 qinit -c shouldn't touch these files if they already exist
166
166
167 $ cat .hg/patches/.hgignore
167 $ cat .hg/patches/.hgignore
168 status
168 status
169 bleh
169 bleh
170 $ cat .hg/patches/series
170 $ cat .hg/patches/series
171 A
171 A
172 B
172 B
173
173
174 add an untracked file
174 add an untracked file
175
175
176 $ echo >> .hg/patches/flaf
176 $ echo >> .hg/patches/flaf
177
177
178 status --mq with color (issue2096)
178 status --mq with color (issue2096)
179
179
180 $ hg status --mq --config extensions.color= --color=always
180 $ hg status --mq --config extensions.color= --color=always
181 \x1b[0;32;1mA .hgignore\x1b[0m (esc)
181 \x1b[0;32;1mA .hgignore\x1b[0m (esc)
182 \x1b[0;32;1mA A\x1b[0m (esc)
182 \x1b[0;32;1mA A\x1b[0m (esc)
183 \x1b[0;32;1mA B\x1b[0m (esc)
183 \x1b[0;32;1mA B\x1b[0m (esc)
184 \x1b[0;32;1mA series\x1b[0m (esc)
184 \x1b[0;32;1mA series\x1b[0m (esc)
185 \x1b[0;35;1;4m? flaf\x1b[0m (esc)
185 \x1b[0;35;1;4m? flaf\x1b[0m (esc)
186
186
187 try the --mq option on a command provided by an extension
187 try the --mq option on a command provided by an extension
188
188
189 $ hg purge --mq --verbose --config extensions.purge=
189 $ hg purge --mq --verbose --config extensions.purge=
190 Removing file flaf
190 Removing file flaf
191
191
192 $ cd ..
192 $ cd ..
193
193
194 init --mq without repo
194 init --mq without repo
195
195
196 $ mkdir f
196 $ mkdir f
197 $ cd f
197 $ cd f
198 $ hg init --mq
198 $ hg init --mq
199 abort: there is no Mercurial repository here (.hg not found)
199 abort: there is no Mercurial repository here (.hg not found)
200 [255]
200 [255]
201 $ cd ..
201 $ cd ..
202
202
203 init --mq with repo path
203 init --mq with repo path
204
204
205 $ hg init g
205 $ hg init g
206 $ hg init --mq g
206 $ hg init --mq g
207 $ test -d g/.hg/patches/.hg
207 $ test -d g/.hg/patches/.hg
208
208
209 init --mq with nonexistent directory
209 init --mq with nonexistent directory
210
210
211 $ hg init --mq nonexistentdir
211 $ hg init --mq nonexistentdir
212 abort: repository nonexistentdir not found!
212 abort: repository nonexistentdir not found!
213 [255]
213 [255]
214
214
215
215
216 init --mq with bundle (non "local")
216 init --mq with bundle (non "local")
217
217
218 $ hg -R a bundle --all a.bundle >/dev/null
218 $ hg -R a bundle --all a.bundle >/dev/null
219 $ hg init --mq a.bundle
219 $ hg init --mq a.bundle
220 abort: only a local queue repository may be initialized
220 abort: only a local queue repository may be initialized
221 [255]
221 [255]
222
222
223 $ cd a
223 $ cd a
224
224
225 $ hg qnew -m 'foo bar' test.patch
225 $ hg qnew -m 'foo bar' test.patch
226
226
227 $ echo '# comment' > .hg/patches/series.tmp
227 $ echo '# comment' > .hg/patches/series.tmp
228 $ echo >> .hg/patches/series.tmp # empty line
228 $ echo >> .hg/patches/series.tmp # empty line
229 $ cat .hg/patches/series >> .hg/patches/series.tmp
229 $ cat .hg/patches/series >> .hg/patches/series.tmp
230 $ mv .hg/patches/series.tmp .hg/patches/series
230 $ mv .hg/patches/series.tmp .hg/patches/series
231
231
232
232
233 qrefresh
233 qrefresh
234
234
235 $ echo a >> a
235 $ echo a >> a
236 $ hg qrefresh
236 $ hg qrefresh
237 $ cat .hg/patches/test.patch
237 $ cat .hg/patches/test.patch
238 foo bar
238 foo bar
239
239
240 diff -r [a-f0-9]* a (re)
240 diff -r [a-f0-9]* a (re)
241 --- a/a\t(?P<date>.*) (re)
241 --- a/a\t(?P<date>.*) (re)
242 \+\+\+ b/a\t(?P<date2>.*) (re)
242 \+\+\+ b/a\t(?P<date2>.*) (re)
243 @@ -1,1 +1,2 @@
243 @@ -1,1 +1,2 @@
244 a
244 a
245 +a
245 +a
246
246
247 empty qrefresh
247 empty qrefresh
248
248
249 $ hg qrefresh -X a
249 $ hg qrefresh -X a
250
250
251 revision:
251 revision:
252
252
253 $ hg diff -r -2 -r -1
253 $ hg diff -r -2 -r -1
254
254
255 patch:
255 patch:
256
256
257 $ cat .hg/patches/test.patch
257 $ cat .hg/patches/test.patch
258 foo bar
258 foo bar
259
259
260
260
261 working dir diff:
261 working dir diff:
262
262
263 $ hg diff --nodates -q
263 $ hg diff --nodates -q
264 --- a/a
264 --- a/a
265 +++ b/a
265 +++ b/a
266 @@ -1,1 +1,2 @@
266 @@ -1,1 +1,2 @@
267 a
267 a
268 +a
268 +a
269
269
270 restore things
270 restore things
271
271
272 $ hg qrefresh
272 $ hg qrefresh
273 $ checkundo qrefresh
273 $ checkundo qrefresh
274
274
275
275
276 qpop
276 qpop
277
277
278 $ hg qpop
278 $ hg qpop
279 popping test.patch
279 popping test.patch
280 patch queue now empty
280 patch queue now empty
281 $ checkundo qpop
281 $ checkundo qpop
282
282
283
283
284 qpush with dump of tag cache
284 qpush with dump of tag cache
285 Dump the tag cache to ensure that it has exactly one head after qpush.
285 Dump the tag cache to ensure that it has exactly one head after qpush.
286
286
287 $ rm -f .hg/tags.cache
287 $ rm -f .hg/cache/tags
288 $ hg tags > /dev/null
288 $ hg tags > /dev/null
289
289
290 .hg/tags.cache (pre qpush):
290 .hg/cache/tags (pre qpush):
291
291
292 $ cat .hg/tags.cache
292 $ cat .hg/cache/tags
293 1 [\da-f]{40} (re)
293 1 [\da-f]{40} (re)
294
294
295 $ hg qpush
295 $ hg qpush
296 applying test.patch
296 applying test.patch
297 now at: test.patch
297 now at: test.patch
298 $ hg tags > /dev/null
298 $ hg tags > /dev/null
299
299
300 .hg/tags.cache (post qpush):
300 .hg/cache/tags (post qpush):
301
301
302 $ cat .hg/tags.cache
302 $ cat .hg/cache/tags
303 2 [\da-f]{40} (re)
303 2 [\da-f]{40} (re)
304
304
305 $ checkundo qpush
305 $ checkundo qpush
306 $ cd ..
306 $ cd ..
307
307
308
308
309 pop/push outside repo
309 pop/push outside repo
310 $ hg -R a qpop
310 $ hg -R a qpop
311 popping test.patch
311 popping test.patch
312 patch queue now empty
312 patch queue now empty
313 $ hg -R a qpush
313 $ hg -R a qpush
314 applying test.patch
314 applying test.patch
315 now at: test.patch
315 now at: test.patch
316
316
317 $ cd a
317 $ cd a
318 $ hg qnew test2.patch
318 $ hg qnew test2.patch
319
319
320 qrefresh in subdir
320 qrefresh in subdir
321
321
322 $ cd b
322 $ cd b
323 $ echo a > a
323 $ echo a > a
324 $ hg add a
324 $ hg add a
325 $ hg qrefresh
325 $ hg qrefresh
326
326
327 pop/push -a in subdir
327 pop/push -a in subdir
328
328
329 $ hg qpop -a
329 $ hg qpop -a
330 popping test2.patch
330 popping test2.patch
331 popping test.patch
331 popping test.patch
332 patch queue now empty
332 patch queue now empty
333 $ hg --traceback qpush -a
333 $ hg --traceback qpush -a
334 applying test.patch
334 applying test.patch
335 applying test2.patch
335 applying test2.patch
336 now at: test2.patch
336 now at: test2.patch
337
337
338
338
339 setting columns & formatted tests truncating (issue1912)
339 setting columns & formatted tests truncating (issue1912)
340
340
341 $ COLUMNS=4 hg qseries --config ui.formatted=true
341 $ COLUMNS=4 hg qseries --config ui.formatted=true
342 test.patch
342 test.patch
343 test2.patch
343 test2.patch
344 $ COLUMNS=20 hg qseries --config ui.formatted=true -vs
344 $ COLUMNS=20 hg qseries --config ui.formatted=true -vs
345 0 A test.patch: f...
345 0 A test.patch: f...
346 1 A test2.patch:
346 1 A test2.patch:
347 $ hg qpop
347 $ hg qpop
348 popping test2.patch
348 popping test2.patch
349 now at: test.patch
349 now at: test.patch
350 $ hg qseries -vs
350 $ hg qseries -vs
351 0 A test.patch: foo bar
351 0 A test.patch: foo bar
352 1 U test2.patch:
352 1 U test2.patch:
353 $ hg sum | grep mq
353 $ hg sum | grep mq
354 mq: 1 applied, 1 unapplied
354 mq: 1 applied, 1 unapplied
355 $ hg qpush
355 $ hg qpush
356 applying test2.patch
356 applying test2.patch
357 now at: test2.patch
357 now at: test2.patch
358 $ hg sum | grep mq
358 $ hg sum | grep mq
359 mq: 2 applied
359 mq: 2 applied
360 $ hg qapplied
360 $ hg qapplied
361 test.patch
361 test.patch
362 test2.patch
362 test2.patch
363 $ hg qtop
363 $ hg qtop
364 test2.patch
364 test2.patch
365
365
366
366
367 prev
367 prev
368
368
369 $ hg qapp -1
369 $ hg qapp -1
370 test.patch
370 test.patch
371
371
372 next
372 next
373
373
374 $ hg qunapp -1
374 $ hg qunapp -1
375 all patches applied
375 all patches applied
376 [1]
376 [1]
377
377
378 $ hg qpop
378 $ hg qpop
379 popping test2.patch
379 popping test2.patch
380 now at: test.patch
380 now at: test.patch
381
381
382 commit should fail
382 commit should fail
383
383
384 $ hg commit
384 $ hg commit
385 abort: cannot commit over an applied mq patch
385 abort: cannot commit over an applied mq patch
386 [255]
386 [255]
387
387
388 push should fail
388 push should fail
389
389
390 $ hg push ../../k
390 $ hg push ../../k
391 pushing to ../../k
391 pushing to ../../k
392 abort: source has mq patches applied
392 abort: source has mq patches applied
393 [255]
393 [255]
394
394
395
395
396 import should fail
396 import should fail
397
397
398 $ hg st .
398 $ hg st .
399 $ echo foo >> ../a
399 $ echo foo >> ../a
400 $ hg diff > ../../import.diff
400 $ hg diff > ../../import.diff
401 $ hg revert --no-backup ../a
401 $ hg revert --no-backup ../a
402 $ hg import ../../import.diff
402 $ hg import ../../import.diff
403 abort: cannot import over an applied patch
403 abort: cannot import over an applied patch
404 [255]
404 [255]
405 $ hg st
405 $ hg st
406
406
407 import --no-commit should succeed
407 import --no-commit should succeed
408
408
409 $ hg import --no-commit ../../import.diff
409 $ hg import --no-commit ../../import.diff
410 applying ../../import.diff
410 applying ../../import.diff
411 $ hg st
411 $ hg st
412 M a
412 M a
413 $ hg revert --no-backup ../a
413 $ hg revert --no-backup ../a
414
414
415
415
416 qunapplied
416 qunapplied
417
417
418 $ hg qunapplied
418 $ hg qunapplied
419 test2.patch
419 test2.patch
420
420
421
421
422 qpush/qpop with index
422 qpush/qpop with index
423
423
424 $ hg qnew test1b.patch
424 $ hg qnew test1b.patch
425 $ echo 1b > 1b
425 $ echo 1b > 1b
426 $ hg add 1b
426 $ hg add 1b
427 $ hg qrefresh
427 $ hg qrefresh
428 $ hg qpush 2
428 $ hg qpush 2
429 applying test2.patch
429 applying test2.patch
430 now at: test2.patch
430 now at: test2.patch
431 $ hg qpop 0
431 $ hg qpop 0
432 popping test2.patch
432 popping test2.patch
433 popping test1b.patch
433 popping test1b.patch
434 now at: test.patch
434 now at: test.patch
435 $ hg qpush test.patch+1
435 $ hg qpush test.patch+1
436 applying test1b.patch
436 applying test1b.patch
437 now at: test1b.patch
437 now at: test1b.patch
438 $ hg qpush test.patch+2
438 $ hg qpush test.patch+2
439 applying test2.patch
439 applying test2.patch
440 now at: test2.patch
440 now at: test2.patch
441 $ hg qpop test2.patch-1
441 $ hg qpop test2.patch-1
442 popping test2.patch
442 popping test2.patch
443 now at: test1b.patch
443 now at: test1b.patch
444 $ hg qpop test2.patch-2
444 $ hg qpop test2.patch-2
445 popping test1b.patch
445 popping test1b.patch
446 now at: test.patch
446 now at: test.patch
447 $ hg qpush test1b.patch+1
447 $ hg qpush test1b.patch+1
448 applying test1b.patch
448 applying test1b.patch
449 applying test2.patch
449 applying test2.patch
450 now at: test2.patch
450 now at: test2.patch
451
451
452
452
453 qpush --move
453 qpush --move
454
454
455 $ hg qpop -a
455 $ hg qpop -a
456 popping test2.patch
456 popping test2.patch
457 popping test1b.patch
457 popping test1b.patch
458 popping test.patch
458 popping test.patch
459 patch queue now empty
459 patch queue now empty
460 $ hg qguard test1b.patch -- -negguard
460 $ hg qguard test1b.patch -- -negguard
461 $ hg qguard test2.patch -- +posguard
461 $ hg qguard test2.patch -- +posguard
462 $ hg qpush --move test2.patch # can't move guarded patch
462 $ hg qpush --move test2.patch # can't move guarded patch
463 cannot push 'test2.patch' - guarded by ['+posguard']
463 cannot push 'test2.patch' - guarded by ['+posguard']
464 [1]
464 [1]
465 $ hg qselect posguard
465 $ hg qselect posguard
466 number of unguarded, unapplied patches has changed from 2 to 3
466 number of unguarded, unapplied patches has changed from 2 to 3
467 $ hg qpush --move test2.patch # move to front
467 $ hg qpush --move test2.patch # move to front
468 applying test2.patch
468 applying test2.patch
469 now at: test2.patch
469 now at: test2.patch
470 $ hg qpush --move test1b.patch # negative guard unselected
470 $ hg qpush --move test1b.patch # negative guard unselected
471 applying test1b.patch
471 applying test1b.patch
472 now at: test1b.patch
472 now at: test1b.patch
473 $ hg qpush --move test.patch # noop move
473 $ hg qpush --move test.patch # noop move
474 applying test.patch
474 applying test.patch
475 now at: test.patch
475 now at: test.patch
476 $ hg qseries -v
476 $ hg qseries -v
477 0 A test2.patch
477 0 A test2.patch
478 1 A test1b.patch
478 1 A test1b.patch
479 2 A test.patch
479 2 A test.patch
480 $ hg qpop -a
480 $ hg qpop -a
481 popping test.patch
481 popping test.patch
482 popping test1b.patch
482 popping test1b.patch
483 popping test2.patch
483 popping test2.patch
484 patch queue now empty
484 patch queue now empty
485
485
486 cleaning up
486 cleaning up
487
487
488 $ hg qselect --none
488 $ hg qselect --none
489 guards deactivated
489 guards deactivated
490 number of unguarded, unapplied patches has changed from 3 to 2
490 number of unguarded, unapplied patches has changed from 3 to 2
491 $ hg qguard --none test1b.patch
491 $ hg qguard --none test1b.patch
492 $ hg qguard --none test2.patch
492 $ hg qguard --none test2.patch
493 $ hg qpush --move test.patch
493 $ hg qpush --move test.patch
494 applying test.patch
494 applying test.patch
495 now at: test.patch
495 now at: test.patch
496 $ hg qpush --move test1b.patch
496 $ hg qpush --move test1b.patch
497 applying test1b.patch
497 applying test1b.patch
498 now at: test1b.patch
498 now at: test1b.patch
499 $ hg qpush --move bogus # nonexistent patch
499 $ hg qpush --move bogus # nonexistent patch
500 abort: patch bogus not in series
500 abort: patch bogus not in series
501 [255]
501 [255]
502 $ hg qpush --move # no patch
502 $ hg qpush --move # no patch
503 abort: please specify the patch to move
503 abort: please specify the patch to move
504 [255]
504 [255]
505 $ hg qpush --move test.patch # already applied
505 $ hg qpush --move test.patch # already applied
506 abort: cannot push to a previous patch: test.patch
506 abort: cannot push to a previous patch: test.patch
507 [255]
507 [255]
508 $ hg qpush
508 $ hg qpush
509 applying test2.patch
509 applying test2.patch
510 now at: test2.patch
510 now at: test2.patch
511
511
512
512
513 series after move
513 series after move
514
514
515 $ cat `hg root`/.hg/patches/series
515 $ cat `hg root`/.hg/patches/series
516 test.patch
516 test.patch
517 test1b.patch
517 test1b.patch
518 test2.patch
518 test2.patch
519 # comment
519 # comment
520
520
521
521
522
522
523 pop, qapplied, qunapplied
523 pop, qapplied, qunapplied
524
524
525 $ hg qseries -v
525 $ hg qseries -v
526 0 A test.patch
526 0 A test.patch
527 1 A test1b.patch
527 1 A test1b.patch
528 2 A test2.patch
528 2 A test2.patch
529
529
530 qapplied -1 test.patch
530 qapplied -1 test.patch
531
531
532 $ hg qapplied -1 test.patch
532 $ hg qapplied -1 test.patch
533 only one patch applied
533 only one patch applied
534 [1]
534 [1]
535
535
536 qapplied -1 test1b.patch
536 qapplied -1 test1b.patch
537
537
538 $ hg qapplied -1 test1b.patch
538 $ hg qapplied -1 test1b.patch
539 test.patch
539 test.patch
540
540
541 qapplied -1 test2.patch
541 qapplied -1 test2.patch
542
542
543 $ hg qapplied -1 test2.patch
543 $ hg qapplied -1 test2.patch
544 test1b.patch
544 test1b.patch
545
545
546 qapplied -1
546 qapplied -1
547
547
548 $ hg qapplied -1
548 $ hg qapplied -1
549 test1b.patch
549 test1b.patch
550
550
551 qapplied
551 qapplied
552
552
553 $ hg qapplied
553 $ hg qapplied
554 test.patch
554 test.patch
555 test1b.patch
555 test1b.patch
556 test2.patch
556 test2.patch
557
557
558 qapplied test1b.patch
558 qapplied test1b.patch
559
559
560 $ hg qapplied test1b.patch
560 $ hg qapplied test1b.patch
561 test.patch
561 test.patch
562 test1b.patch
562 test1b.patch
563
563
564 qunapplied -1
564 qunapplied -1
565
565
566 $ hg qunapplied -1
566 $ hg qunapplied -1
567 all patches applied
567 all patches applied
568 [1]
568 [1]
569
569
570 qunapplied
570 qunapplied
571
571
572 $ hg qunapplied
572 $ hg qunapplied
573
573
574 popping
574 popping
575
575
576 $ hg qpop
576 $ hg qpop
577 popping test2.patch
577 popping test2.patch
578 now at: test1b.patch
578 now at: test1b.patch
579
579
580 qunapplied -1
580 qunapplied -1
581
581
582 $ hg qunapplied -1
582 $ hg qunapplied -1
583 test2.patch
583 test2.patch
584
584
585 qunapplied
585 qunapplied
586
586
587 $ hg qunapplied
587 $ hg qunapplied
588 test2.patch
588 test2.patch
589
589
590 qunapplied test2.patch
590 qunapplied test2.patch
591
591
592 $ hg qunapplied test2.patch
592 $ hg qunapplied test2.patch
593
593
594 qunapplied -1 test2.patch
594 qunapplied -1 test2.patch
595
595
596 $ hg qunapplied -1 test2.patch
596 $ hg qunapplied -1 test2.patch
597 all patches applied
597 all patches applied
598 [1]
598 [1]
599
599
600 popping -a
600 popping -a
601
601
602 $ hg qpop -a
602 $ hg qpop -a
603 popping test1b.patch
603 popping test1b.patch
604 popping test.patch
604 popping test.patch
605 patch queue now empty
605 patch queue now empty
606
606
607 qapplied
607 qapplied
608
608
609 $ hg qapplied
609 $ hg qapplied
610
610
611 qapplied -1
611 qapplied -1
612
612
613 $ hg qapplied -1
613 $ hg qapplied -1
614 no patches applied
614 no patches applied
615 [1]
615 [1]
616 $ hg qpush
616 $ hg qpush
617 applying test.patch
617 applying test.patch
618 now at: test.patch
618 now at: test.patch
619
619
620
620
621 push should succeed
621 push should succeed
622
622
623 $ hg qpop -a
623 $ hg qpop -a
624 popping test.patch
624 popping test.patch
625 patch queue now empty
625 patch queue now empty
626 $ hg push ../../k
626 $ hg push ../../k
627 pushing to ../../k
627 pushing to ../../k
628 searching for changes
628 searching for changes
629 adding changesets
629 adding changesets
630 adding manifests
630 adding manifests
631 adding file changes
631 adding file changes
632 added 1 changesets with 1 changes to 1 files
632 added 1 changesets with 1 changes to 1 files
633
633
634
634
635 we want to start with some patches applied
635 we want to start with some patches applied
636
636
637 $ hg qpush -a
637 $ hg qpush -a
638 applying test.patch
638 applying test.patch
639 applying test1b.patch
639 applying test1b.patch
640 applying test2.patch
640 applying test2.patch
641 now at: test2.patch
641 now at: test2.patch
642
642
643 % pops all patches and succeeds
643 % pops all patches and succeeds
644
644
645 $ hg qpop -a
645 $ hg qpop -a
646 popping test2.patch
646 popping test2.patch
647 popping test1b.patch
647 popping test1b.patch
648 popping test.patch
648 popping test.patch
649 patch queue now empty
649 patch queue now empty
650
650
651 % does nothing and succeeds
651 % does nothing and succeeds
652
652
653 $ hg qpop -a
653 $ hg qpop -a
654 no patches applied
654 no patches applied
655
655
656 % fails - nothing else to pop
656 % fails - nothing else to pop
657
657
658 $ hg qpop
658 $ hg qpop
659 no patches applied
659 no patches applied
660 [1]
660 [1]
661
661
662 % pushes a patch and succeeds
662 % pushes a patch and succeeds
663
663
664 $ hg qpush
664 $ hg qpush
665 applying test.patch
665 applying test.patch
666 now at: test.patch
666 now at: test.patch
667
667
668 % pops a patch and succeeds
668 % pops a patch and succeeds
669
669
670 $ hg qpop
670 $ hg qpop
671 popping test.patch
671 popping test.patch
672 patch queue now empty
672 patch queue now empty
673
673
674 % pushes up to test1b.patch and succeeds
674 % pushes up to test1b.patch and succeeds
675
675
676 $ hg qpush test1b.patch
676 $ hg qpush test1b.patch
677 applying test.patch
677 applying test.patch
678 applying test1b.patch
678 applying test1b.patch
679 now at: test1b.patch
679 now at: test1b.patch
680
680
681 % does nothing and succeeds
681 % does nothing and succeeds
682
682
683 $ hg qpush test1b.patch
683 $ hg qpush test1b.patch
684 qpush: test1b.patch is already at the top
684 qpush: test1b.patch is already at the top
685
685
686 % does nothing and succeeds
686 % does nothing and succeeds
687
687
688 $ hg qpop test1b.patch
688 $ hg qpop test1b.patch
689 qpop: test1b.patch is already at the top
689 qpop: test1b.patch is already at the top
690
690
691 % fails - can't push to this patch
691 % fails - can't push to this patch
692
692
693 $ hg qpush test.patch
693 $ hg qpush test.patch
694 abort: cannot push to a previous patch: test.patch
694 abort: cannot push to a previous patch: test.patch
695 [255]
695 [255]
696
696
697 % fails - can't pop to this patch
697 % fails - can't pop to this patch
698
698
699 $ hg qpop test2.patch
699 $ hg qpop test2.patch
700 abort: patch test2.patch is not applied
700 abort: patch test2.patch is not applied
701 [255]
701 [255]
702
702
703 % pops up to test.patch and succeeds
703 % pops up to test.patch and succeeds
704
704
705 $ hg qpop test.patch
705 $ hg qpop test.patch
706 popping test1b.patch
706 popping test1b.patch
707 now at: test.patch
707 now at: test.patch
708
708
709 % pushes all patches and succeeds
709 % pushes all patches and succeeds
710
710
711 $ hg qpush -a
711 $ hg qpush -a
712 applying test1b.patch
712 applying test1b.patch
713 applying test2.patch
713 applying test2.patch
714 now at: test2.patch
714 now at: test2.patch
715
715
716 % does nothing and succeeds
716 % does nothing and succeeds
717
717
718 $ hg qpush -a
718 $ hg qpush -a
719 all patches are currently applied
719 all patches are currently applied
720
720
721 % fails - nothing else to push
721 % fails - nothing else to push
722
722
723 $ hg qpush
723 $ hg qpush
724 patch series already fully applied
724 patch series already fully applied
725 [1]
725 [1]
726
726
727 % does nothing and succeeds
727 % does nothing and succeeds
728
728
729 $ hg qpush test2.patch
729 $ hg qpush test2.patch
730 qpush: test2.patch is already at the top
730 qpush: test2.patch is already at the top
731
731
732 strip
732 strip
733
733
734 $ cd ../../b
734 $ cd ../../b
735 $ echo x>x
735 $ echo x>x
736 $ hg ci -Ama
736 $ hg ci -Ama
737 adding x
737 adding x
738 $ hg strip tip
738 $ hg strip tip
739 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
739 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
740 saved backup bundle to $TESTTMP/b/.hg/strip-backup/*-backup.hg (glob)
740 saved backup bundle to $TESTTMP/b/.hg/strip-backup/*-backup.hg (glob)
741 $ hg unbundle .hg/strip-backup/*
741 $ hg unbundle .hg/strip-backup/*
742 adding changesets
742 adding changesets
743 adding manifests
743 adding manifests
744 adding file changes
744 adding file changes
745 added 1 changesets with 1 changes to 1 files
745 added 1 changesets with 1 changes to 1 files
746 (run 'hg update' to get a working copy)
746 (run 'hg update' to get a working copy)
747
747
748
748
749 strip with local changes, should complain
749 strip with local changes, should complain
750
750
751 $ hg up
751 $ hg up
752 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
752 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
753 $ echo y>y
753 $ echo y>y
754 $ hg add y
754 $ hg add y
755 $ hg strip tip
755 $ hg strip tip
756 abort: local changes found
756 abort: local changes found
757 [255]
757 [255]
758
758
759 --force strip with local changes
759 --force strip with local changes
760
760
761 $ hg strip -f tip
761 $ hg strip -f tip
762 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
762 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
763 saved backup bundle to $TESTTMP/b/.hg/strip-backup/*-backup.hg (glob)
763 saved backup bundle to $TESTTMP/b/.hg/strip-backup/*-backup.hg (glob)
764
764
765
765
766 cd b; hg qrefresh
766 cd b; hg qrefresh
767
767
768 $ hg init refresh
768 $ hg init refresh
769 $ cd refresh
769 $ cd refresh
770 $ echo a > a
770 $ echo a > a
771 $ hg ci -Ama
771 $ hg ci -Ama
772 adding a
772 adding a
773 $ hg qnew -mfoo foo
773 $ hg qnew -mfoo foo
774 $ echo a >> a
774 $ echo a >> a
775 $ hg qrefresh
775 $ hg qrefresh
776 $ mkdir b
776 $ mkdir b
777 $ cd b
777 $ cd b
778 $ echo f > f
778 $ echo f > f
779 $ hg add f
779 $ hg add f
780 $ hg qrefresh
780 $ hg qrefresh
781 $ cat ../.hg/patches/foo
781 $ cat ../.hg/patches/foo
782 foo
782 foo
783
783
784 diff -r cb9a9f314b8b a
784 diff -r cb9a9f314b8b a
785 --- a/a\t(?P<date>.*) (re)
785 --- a/a\t(?P<date>.*) (re)
786 \+\+\+ b/a\t(?P<date>.*) (re)
786 \+\+\+ b/a\t(?P<date>.*) (re)
787 @@ -1,1 +1,2 @@
787 @@ -1,1 +1,2 @@
788 a
788 a
789 +a
789 +a
790 diff -r cb9a9f314b8b b/f
790 diff -r cb9a9f314b8b b/f
791 --- /dev/null\t(?P<date>.*) (re)
791 --- /dev/null\t(?P<date>.*) (re)
792 \+\+\+ b/b/f\t(?P<date>.*) (re)
792 \+\+\+ b/b/f\t(?P<date>.*) (re)
793 @@ -0,0 +1,1 @@
793 @@ -0,0 +1,1 @@
794 +f
794 +f
795
795
796 hg qrefresh .
796 hg qrefresh .
797
797
798 $ hg qrefresh .
798 $ hg qrefresh .
799 $ cat ../.hg/patches/foo
799 $ cat ../.hg/patches/foo
800 foo
800 foo
801
801
802 diff -r cb9a9f314b8b b/f
802 diff -r cb9a9f314b8b b/f
803 --- /dev/null\t(?P<date>.*) (re)
803 --- /dev/null\t(?P<date>.*) (re)
804 \+\+\+ b/b/f\t(?P<date>.*) (re)
804 \+\+\+ b/b/f\t(?P<date>.*) (re)
805 @@ -0,0 +1,1 @@
805 @@ -0,0 +1,1 @@
806 +f
806 +f
807 $ hg status
807 $ hg status
808 M a
808 M a
809
809
810
810
811 qpush failure
811 qpush failure
812
812
813 $ cd ..
813 $ cd ..
814 $ hg qrefresh
814 $ hg qrefresh
815 $ hg qnew -mbar bar
815 $ hg qnew -mbar bar
816 $ echo foo > foo
816 $ echo foo > foo
817 $ echo bar > bar
817 $ echo bar > bar
818 $ hg add foo bar
818 $ hg add foo bar
819 $ hg qrefresh
819 $ hg qrefresh
820 $ hg qpop -a
820 $ hg qpop -a
821 popping bar
821 popping bar
822 popping foo
822 popping foo
823 patch queue now empty
823 patch queue now empty
824 $ echo bar > foo
824 $ echo bar > foo
825 $ hg qpush -a
825 $ hg qpush -a
826 applying foo
826 applying foo
827 applying bar
827 applying bar
828 file foo already exists
828 file foo already exists
829 1 out of 1 hunks FAILED -- saving rejects to file foo.rej
829 1 out of 1 hunks FAILED -- saving rejects to file foo.rej
830 patch failed, unable to continue (try -v)
830 patch failed, unable to continue (try -v)
831 patch failed, rejects left in working dir
831 patch failed, rejects left in working dir
832 errors during apply, please fix and refresh bar
832 errors during apply, please fix and refresh bar
833 [2]
833 [2]
834 $ hg st
834 $ hg st
835 ? foo
835 ? foo
836 ? foo.rej
836 ? foo.rej
837
837
838
838
839 mq tags
839 mq tags
840
840
841 $ hg log --template '{rev} {tags}\n' -r qparent:qtip
841 $ hg log --template '{rev} {tags}\n' -r qparent:qtip
842 0 qparent
842 0 qparent
843 1 foo qbase
843 1 foo qbase
844 2 bar qtip tip
844 2 bar qtip tip
845
845
846
846
847 bad node in status
847 bad node in status
848
848
849 $ hg qpop
849 $ hg qpop
850 popping bar
850 popping bar
851 now at: foo
851 now at: foo
852 $ hg strip -qn tip
852 $ hg strip -qn tip
853 $ hg tip
853 $ hg tip
854 changeset: 0:cb9a9f314b8b
854 changeset: 0:cb9a9f314b8b
855 tag: tip
855 tag: tip
856 user: test
856 user: test
857 date: Thu Jan 01 00:00:00 1970 +0000
857 date: Thu Jan 01 00:00:00 1970 +0000
858 summary: a
858 summary: a
859
859
860 $ hg branches
860 $ hg branches
861 default 0:cb9a9f314b8b
861 default 0:cb9a9f314b8b
862 $ hg qpop
862 $ hg qpop
863 no patches applied
863 no patches applied
864 [1]
864 [1]
865
865
866 $ cat >>$HGRCPATH <<EOF
866 $ cat >>$HGRCPATH <<EOF
867 > [diff]
867 > [diff]
868 > git = True
868 > git = True
869 > EOF
869 > EOF
870 $ cd ..
870 $ cd ..
871 $ hg init git
871 $ hg init git
872 $ cd git
872 $ cd git
873 $ hg qinit
873 $ hg qinit
874
874
875 $ hg qnew -m'new file' new
875 $ hg qnew -m'new file' new
876 $ echo foo > new
876 $ echo foo > new
877 $ chmod +x new
877 $ chmod +x new
878 $ hg add new
878 $ hg add new
879 $ hg qrefresh
879 $ hg qrefresh
880 $ cat .hg/patches/new
880 $ cat .hg/patches/new
881 new file
881 new file
882
882
883 diff --git a/new b/new
883 diff --git a/new b/new
884 new file mode 100755
884 new file mode 100755
885 --- /dev/null
885 --- /dev/null
886 +++ b/new
886 +++ b/new
887 @@ -0,0 +1,1 @@
887 @@ -0,0 +1,1 @@
888 +foo
888 +foo
889
889
890 $ hg qnew -m'copy file' copy
890 $ hg qnew -m'copy file' copy
891 $ hg cp new copy
891 $ hg cp new copy
892 $ hg qrefresh
892 $ hg qrefresh
893 $ cat .hg/patches/copy
893 $ cat .hg/patches/copy
894 copy file
894 copy file
895
895
896 diff --git a/new b/copy
896 diff --git a/new b/copy
897 copy from new
897 copy from new
898 copy to copy
898 copy to copy
899
899
900 $ hg qpop
900 $ hg qpop
901 popping copy
901 popping copy
902 now at: new
902 now at: new
903 $ hg qpush
903 $ hg qpush
904 applying copy
904 applying copy
905 now at: copy
905 now at: copy
906 $ hg qdiff
906 $ hg qdiff
907 diff --git a/new b/copy
907 diff --git a/new b/copy
908 copy from new
908 copy from new
909 copy to copy
909 copy to copy
910 $ cat >>$HGRCPATH <<EOF
910 $ cat >>$HGRCPATH <<EOF
911 > [diff]
911 > [diff]
912 > git = False
912 > git = False
913 > EOF
913 > EOF
914 $ hg qdiff --git
914 $ hg qdiff --git
915 diff --git a/new b/copy
915 diff --git a/new b/copy
916 copy from new
916 copy from new
917 copy to copy
917 copy to copy
918 $ cd ..
918 $ cd ..
919
919
920
920
921 test file addition in slow path
921 test file addition in slow path
922
922
923 $ hg init slow
923 $ hg init slow
924 $ cd slow
924 $ cd slow
925 $ hg qinit
925 $ hg qinit
926 $ echo foo > foo
926 $ echo foo > foo
927 $ hg add foo
927 $ hg add foo
928 $ hg ci -m 'add foo'
928 $ hg ci -m 'add foo'
929 $ hg qnew bar
929 $ hg qnew bar
930 $ echo bar > bar
930 $ echo bar > bar
931 $ hg add bar
931 $ hg add bar
932 $ hg mv foo baz
932 $ hg mv foo baz
933 $ hg qrefresh --git
933 $ hg qrefresh --git
934 $ hg up -C 0
934 $ hg up -C 0
935 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
935 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
936 $ echo >> foo
936 $ echo >> foo
937 $ hg ci -m 'change foo'
937 $ hg ci -m 'change foo'
938 created new head
938 created new head
939 $ hg up -C 1
939 $ hg up -C 1
940 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
940 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
941 $ hg qrefresh --git
941 $ hg qrefresh --git
942 $ cat .hg/patches/bar
942 $ cat .hg/patches/bar
943 diff --git a/bar b/bar
943 diff --git a/bar b/bar
944 new file mode 100644
944 new file mode 100644
945 --- /dev/null
945 --- /dev/null
946 +++ b/bar
946 +++ b/bar
947 @@ -0,0 +1,1 @@
947 @@ -0,0 +1,1 @@
948 +bar
948 +bar
949 diff --git a/foo b/baz
949 diff --git a/foo b/baz
950 rename from foo
950 rename from foo
951 rename to baz
951 rename to baz
952 $ hg log -v --template '{rev} {file_copies}\n' -r .
952 $ hg log -v --template '{rev} {file_copies}\n' -r .
953 2 baz (foo)
953 2 baz (foo)
954 $ hg qrefresh --git
954 $ hg qrefresh --git
955 $ cat .hg/patches/bar
955 $ cat .hg/patches/bar
956 diff --git a/bar b/bar
956 diff --git a/bar b/bar
957 new file mode 100644
957 new file mode 100644
958 --- /dev/null
958 --- /dev/null
959 +++ b/bar
959 +++ b/bar
960 @@ -0,0 +1,1 @@
960 @@ -0,0 +1,1 @@
961 +bar
961 +bar
962 diff --git a/foo b/baz
962 diff --git a/foo b/baz
963 rename from foo
963 rename from foo
964 rename to baz
964 rename to baz
965 $ hg log -v --template '{rev} {file_copies}\n' -r .
965 $ hg log -v --template '{rev} {file_copies}\n' -r .
966 2 baz (foo)
966 2 baz (foo)
967 $ hg qrefresh
967 $ hg qrefresh
968 $ grep 'diff --git' .hg/patches/bar
968 $ grep 'diff --git' .hg/patches/bar
969 diff --git a/bar b/bar
969 diff --git a/bar b/bar
970 diff --git a/foo b/baz
970 diff --git a/foo b/baz
971
971
972
972
973 test file move chains in the slow path
973 test file move chains in the slow path
974
974
975 $ hg up -C 1
975 $ hg up -C 1
976 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
976 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
977 $ echo >> foo
977 $ echo >> foo
978 $ hg ci -m 'change foo again'
978 $ hg ci -m 'change foo again'
979 $ hg up -C 2
979 $ hg up -C 2
980 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
980 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
981 $ hg mv bar quux
981 $ hg mv bar quux
982 $ hg mv baz bleh
982 $ hg mv baz bleh
983 $ hg qrefresh --git
983 $ hg qrefresh --git
984 $ cat .hg/patches/bar
984 $ cat .hg/patches/bar
985 diff --git a/foo b/bleh
985 diff --git a/foo b/bleh
986 rename from foo
986 rename from foo
987 rename to bleh
987 rename to bleh
988 diff --git a/quux b/quux
988 diff --git a/quux b/quux
989 new file mode 100644
989 new file mode 100644
990 --- /dev/null
990 --- /dev/null
991 +++ b/quux
991 +++ b/quux
992 @@ -0,0 +1,1 @@
992 @@ -0,0 +1,1 @@
993 +bar
993 +bar
994 $ hg log -v --template '{rev} {file_copies}\n' -r .
994 $ hg log -v --template '{rev} {file_copies}\n' -r .
995 3 bleh (foo)
995 3 bleh (foo)
996 $ hg mv quux fred
996 $ hg mv quux fred
997 $ hg mv bleh barney
997 $ hg mv bleh barney
998 $ hg qrefresh --git
998 $ hg qrefresh --git
999 $ cat .hg/patches/bar
999 $ cat .hg/patches/bar
1000 diff --git a/foo b/barney
1000 diff --git a/foo b/barney
1001 rename from foo
1001 rename from foo
1002 rename to barney
1002 rename to barney
1003 diff --git a/fred b/fred
1003 diff --git a/fred b/fred
1004 new file mode 100644
1004 new file mode 100644
1005 --- /dev/null
1005 --- /dev/null
1006 +++ b/fred
1006 +++ b/fred
1007 @@ -0,0 +1,1 @@
1007 @@ -0,0 +1,1 @@
1008 +bar
1008 +bar
1009 $ hg log -v --template '{rev} {file_copies}\n' -r .
1009 $ hg log -v --template '{rev} {file_copies}\n' -r .
1010 3 barney (foo)
1010 3 barney (foo)
1011
1011
1012
1012
1013 refresh omitting an added file
1013 refresh omitting an added file
1014
1014
1015 $ hg qnew baz
1015 $ hg qnew baz
1016 $ echo newfile > newfile
1016 $ echo newfile > newfile
1017 $ hg add newfile
1017 $ hg add newfile
1018 $ hg qrefresh
1018 $ hg qrefresh
1019 $ hg st -A newfile
1019 $ hg st -A newfile
1020 C newfile
1020 C newfile
1021 $ hg qrefresh -X newfile
1021 $ hg qrefresh -X newfile
1022 $ hg st -A newfile
1022 $ hg st -A newfile
1023 A newfile
1023 A newfile
1024 $ hg revert newfile
1024 $ hg revert newfile
1025 $ rm newfile
1025 $ rm newfile
1026 $ hg qpop
1026 $ hg qpop
1027 popping baz
1027 popping baz
1028 now at: bar
1028 now at: bar
1029 $ hg qdel baz
1029 $ hg qdel baz
1030
1030
1031
1031
1032 create a git patch
1032 create a git patch
1033
1033
1034 $ echo a > alexander
1034 $ echo a > alexander
1035 $ hg add alexander
1035 $ hg add alexander
1036 $ hg qnew -f --git addalexander
1036 $ hg qnew -f --git addalexander
1037 $ grep diff .hg/patches/addalexander
1037 $ grep diff .hg/patches/addalexander
1038 diff --git a/alexander b/alexander
1038 diff --git a/alexander b/alexander
1039
1039
1040
1040
1041 create a git binary patch
1041 create a git binary patch
1042
1042
1043 $ cat > writebin.py <<EOF
1043 $ cat > writebin.py <<EOF
1044 > import sys
1044 > import sys
1045 > path = sys.argv[1]
1045 > path = sys.argv[1]
1046 > open(path, 'wb').write('BIN\x00ARY')
1046 > open(path, 'wb').write('BIN\x00ARY')
1047 > EOF
1047 > EOF
1048 $ python writebin.py bucephalus
1048 $ python writebin.py bucephalus
1049
1049
1050 $ python "$TESTDIR/md5sum.py" bucephalus
1050 $ python "$TESTDIR/md5sum.py" bucephalus
1051 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
1051 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
1052 $ hg add bucephalus
1052 $ hg add bucephalus
1053 $ hg qnew -f --git addbucephalus
1053 $ hg qnew -f --git addbucephalus
1054 $ grep diff .hg/patches/addbucephalus
1054 $ grep diff .hg/patches/addbucephalus
1055 diff --git a/bucephalus b/bucephalus
1055 diff --git a/bucephalus b/bucephalus
1056
1056
1057
1057
1058 check binary patches can be popped and pushed
1058 check binary patches can be popped and pushed
1059
1059
1060 $ hg qpop
1060 $ hg qpop
1061 popping addbucephalus
1061 popping addbucephalus
1062 now at: addalexander
1062 now at: addalexander
1063 $ test -f bucephalus && echo % bucephalus should not be there
1063 $ test -f bucephalus && echo % bucephalus should not be there
1064 [1]
1064 [1]
1065 $ hg qpush
1065 $ hg qpush
1066 applying addbucephalus
1066 applying addbucephalus
1067 now at: addbucephalus
1067 now at: addbucephalus
1068 $ test -f bucephalus
1068 $ test -f bucephalus
1069 $ python "$TESTDIR/md5sum.py" bucephalus
1069 $ python "$TESTDIR/md5sum.py" bucephalus
1070 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
1070 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
1071
1071
1072
1072
1073
1073
1074 strip again
1074 strip again
1075
1075
1076 $ cd ..
1076 $ cd ..
1077 $ hg init strip
1077 $ hg init strip
1078 $ cd strip
1078 $ cd strip
1079 $ touch foo
1079 $ touch foo
1080 $ hg add foo
1080 $ hg add foo
1081 $ hg ci -m 'add foo'
1081 $ hg ci -m 'add foo'
1082 $ echo >> foo
1082 $ echo >> foo
1083 $ hg ci -m 'change foo 1'
1083 $ hg ci -m 'change foo 1'
1084 $ hg up -C 0
1084 $ hg up -C 0
1085 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1085 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1086 $ echo 1 >> foo
1086 $ echo 1 >> foo
1087 $ hg ci -m 'change foo 2'
1087 $ hg ci -m 'change foo 2'
1088 created new head
1088 created new head
1089 $ HGMERGE=true hg merge
1089 $ HGMERGE=true hg merge
1090 merging foo
1090 merging foo
1091 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
1091 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
1092 (branch merge, don't forget to commit)
1092 (branch merge, don't forget to commit)
1093 $ hg ci -m merge
1093 $ hg ci -m merge
1094 $ hg log
1094 $ hg log
1095 changeset: 3:99615015637b
1095 changeset: 3:99615015637b
1096 tag: tip
1096 tag: tip
1097 parent: 2:20cbbe65cff7
1097 parent: 2:20cbbe65cff7
1098 parent: 1:d2871fc282d4
1098 parent: 1:d2871fc282d4
1099 user: test
1099 user: test
1100 date: Thu Jan 01 00:00:00 1970 +0000
1100 date: Thu Jan 01 00:00:00 1970 +0000
1101 summary: merge
1101 summary: merge
1102
1102
1103 changeset: 2:20cbbe65cff7
1103 changeset: 2:20cbbe65cff7
1104 parent: 0:53245c60e682
1104 parent: 0:53245c60e682
1105 user: test
1105 user: test
1106 date: Thu Jan 01 00:00:00 1970 +0000
1106 date: Thu Jan 01 00:00:00 1970 +0000
1107 summary: change foo 2
1107 summary: change foo 2
1108
1108
1109 changeset: 1:d2871fc282d4
1109 changeset: 1:d2871fc282d4
1110 user: test
1110 user: test
1111 date: Thu Jan 01 00:00:00 1970 +0000
1111 date: Thu Jan 01 00:00:00 1970 +0000
1112 summary: change foo 1
1112 summary: change foo 1
1113
1113
1114 changeset: 0:53245c60e682
1114 changeset: 0:53245c60e682
1115 user: test
1115 user: test
1116 date: Thu Jan 01 00:00:00 1970 +0000
1116 date: Thu Jan 01 00:00:00 1970 +0000
1117 summary: add foo
1117 summary: add foo
1118
1118
1119 $ hg strip 1
1119 $ hg strip 1
1120 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1120 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1121 saved backup bundle to $TESTTMP/b/strip/.hg/strip-backup/*-backup.hg (glob)
1121 saved backup bundle to $TESTTMP/b/strip/.hg/strip-backup/*-backup.hg (glob)
1122 $ checkundo strip
1122 $ checkundo strip
1123 $ hg log
1123 $ hg log
1124 changeset: 1:20cbbe65cff7
1124 changeset: 1:20cbbe65cff7
1125 tag: tip
1125 tag: tip
1126 user: test
1126 user: test
1127 date: Thu Jan 01 00:00:00 1970 +0000
1127 date: Thu Jan 01 00:00:00 1970 +0000
1128 summary: change foo 2
1128 summary: change foo 2
1129
1129
1130 changeset: 0:53245c60e682
1130 changeset: 0:53245c60e682
1131 user: test
1131 user: test
1132 date: Thu Jan 01 00:00:00 1970 +0000
1132 date: Thu Jan 01 00:00:00 1970 +0000
1133 summary: add foo
1133 summary: add foo
1134
1134
1135 $ cd ..
1135 $ cd ..
1136
1136
1137
1137
1138 qclone
1138 qclone
1139
1139
1140 $ qlog()
1140 $ qlog()
1141 > {
1141 > {
1142 > echo 'main repo:'
1142 > echo 'main repo:'
1143 > hg log --template ' rev {rev}: {desc}\n'
1143 > hg log --template ' rev {rev}: {desc}\n'
1144 > echo 'patch repo:'
1144 > echo 'patch repo:'
1145 > hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
1145 > hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
1146 > }
1146 > }
1147 $ hg init qclonesource
1147 $ hg init qclonesource
1148 $ cd qclonesource
1148 $ cd qclonesource
1149 $ echo foo > foo
1149 $ echo foo > foo
1150 $ hg add foo
1150 $ hg add foo
1151 $ hg ci -m 'add foo'
1151 $ hg ci -m 'add foo'
1152 $ hg qinit
1152 $ hg qinit
1153 $ hg qnew patch1
1153 $ hg qnew patch1
1154 $ echo bar >> foo
1154 $ echo bar >> foo
1155 $ hg qrefresh -m 'change foo'
1155 $ hg qrefresh -m 'change foo'
1156 $ cd ..
1156 $ cd ..
1157
1157
1158
1158
1159 repo with unversioned patch dir
1159 repo with unversioned patch dir
1160
1160
1161 $ hg qclone qclonesource failure
1161 $ hg qclone qclonesource failure
1162 abort: versioned patch repository not found (see init --mq)
1162 abort: versioned patch repository not found (see init --mq)
1163 [255]
1163 [255]
1164
1164
1165 $ cd qclonesource
1165 $ cd qclonesource
1166 $ hg qinit -c
1166 $ hg qinit -c
1167 adding .hg/patches/patch1
1167 adding .hg/patches/patch1
1168 $ hg qci -m checkpoint
1168 $ hg qci -m checkpoint
1169 $ qlog
1169 $ qlog
1170 main repo:
1170 main repo:
1171 rev 1: change foo
1171 rev 1: change foo
1172 rev 0: add foo
1172 rev 0: add foo
1173 patch repo:
1173 patch repo:
1174 rev 0: checkpoint
1174 rev 0: checkpoint
1175 $ cd ..
1175 $ cd ..
1176
1176
1177
1177
1178 repo with patches applied
1178 repo with patches applied
1179
1179
1180 $ hg qclone qclonesource qclonedest
1180 $ hg qclone qclonesource qclonedest
1181 updating to branch default
1181 updating to branch default
1182 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1182 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1183 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1183 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1184 $ cd qclonedest
1184 $ cd qclonedest
1185 $ qlog
1185 $ qlog
1186 main repo:
1186 main repo:
1187 rev 0: add foo
1187 rev 0: add foo
1188 patch repo:
1188 patch repo:
1189 rev 0: checkpoint
1189 rev 0: checkpoint
1190 $ cd ..
1190 $ cd ..
1191
1191
1192
1192
1193 repo with patches unapplied
1193 repo with patches unapplied
1194
1194
1195 $ cd qclonesource
1195 $ cd qclonesource
1196 $ hg qpop -a
1196 $ hg qpop -a
1197 popping patch1
1197 popping patch1
1198 patch queue now empty
1198 patch queue now empty
1199 $ qlog
1199 $ qlog
1200 main repo:
1200 main repo:
1201 rev 0: add foo
1201 rev 0: add foo
1202 patch repo:
1202 patch repo:
1203 rev 0: checkpoint
1203 rev 0: checkpoint
1204 $ cd ..
1204 $ cd ..
1205 $ hg qclone qclonesource qclonedest2
1205 $ hg qclone qclonesource qclonedest2
1206 updating to branch default
1206 updating to branch default
1207 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1207 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1208 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1208 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1209 $ cd qclonedest2
1209 $ cd qclonedest2
1210 $ qlog
1210 $ qlog
1211 main repo:
1211 main repo:
1212 rev 0: add foo
1212 rev 0: add foo
1213 patch repo:
1213 patch repo:
1214 rev 0: checkpoint
1214 rev 0: checkpoint
1215 $ cd ..
1215 $ cd ..
1216
1216
1217
1217
1218 Issue1033: test applying on an empty file
1218 Issue1033: test applying on an empty file
1219
1219
1220 $ hg init empty
1220 $ hg init empty
1221 $ cd empty
1221 $ cd empty
1222 $ touch a
1222 $ touch a
1223 $ hg ci -Am addempty
1223 $ hg ci -Am addempty
1224 adding a
1224 adding a
1225 $ echo a > a
1225 $ echo a > a
1226 $ hg qnew -f -e changea
1226 $ hg qnew -f -e changea
1227 $ hg qpop
1227 $ hg qpop
1228 popping changea
1228 popping changea
1229 patch queue now empty
1229 patch queue now empty
1230 $ hg qpush
1230 $ hg qpush
1231 applying changea
1231 applying changea
1232 now at: changea
1232 now at: changea
1233 $ cd ..
1233 $ cd ..
1234
1234
1235
1235
1236 test qpush with --force, issue1087
1236 test qpush with --force, issue1087
1237
1237
1238 $ hg init forcepush
1238 $ hg init forcepush
1239 $ cd forcepush
1239 $ cd forcepush
1240 $ echo hello > hello.txt
1240 $ echo hello > hello.txt
1241 $ echo bye > bye.txt
1241 $ echo bye > bye.txt
1242 $ hg ci -Ama
1242 $ hg ci -Ama
1243 adding bye.txt
1243 adding bye.txt
1244 adding hello.txt
1244 adding hello.txt
1245 $ hg qnew -d '0 0' empty
1245 $ hg qnew -d '0 0' empty
1246 $ hg qpop
1246 $ hg qpop
1247 popping empty
1247 popping empty
1248 patch queue now empty
1248 patch queue now empty
1249 $ echo world >> hello.txt
1249 $ echo world >> hello.txt
1250
1250
1251
1251
1252 qpush should fail, local changes
1252 qpush should fail, local changes
1253
1253
1254 $ hg qpush
1254 $ hg qpush
1255 abort: local changes found, refresh first
1255 abort: local changes found, refresh first
1256 [255]
1256 [255]
1257
1257
1258
1258
1259 apply force, should not discard changes with empty patch
1259 apply force, should not discard changes with empty patch
1260
1260
1261 $ hg qpush -f
1261 $ hg qpush -f
1262 applying empty
1262 applying empty
1263 patch empty is empty
1263 patch empty is empty
1264 now at: empty
1264 now at: empty
1265 $ hg diff --config diff.nodates=True
1265 $ hg diff --config diff.nodates=True
1266 diff -r bf5fc3f07a0a hello.txt
1266 diff -r bf5fc3f07a0a hello.txt
1267 --- a/hello.txt
1267 --- a/hello.txt
1268 +++ b/hello.txt
1268 +++ b/hello.txt
1269 @@ -1,1 +1,2 @@
1269 @@ -1,1 +1,2 @@
1270 hello
1270 hello
1271 +world
1271 +world
1272 $ hg qdiff --config diff.nodates=True
1272 $ hg qdiff --config diff.nodates=True
1273 diff -r 9ecee4f634e3 hello.txt
1273 diff -r 9ecee4f634e3 hello.txt
1274 --- a/hello.txt
1274 --- a/hello.txt
1275 +++ b/hello.txt
1275 +++ b/hello.txt
1276 @@ -1,1 +1,2 @@
1276 @@ -1,1 +1,2 @@
1277 hello
1277 hello
1278 +world
1278 +world
1279 $ hg log -l1 -p
1279 $ hg log -l1 -p
1280 changeset: 1:bf5fc3f07a0a
1280 changeset: 1:bf5fc3f07a0a
1281 tag: empty
1281 tag: empty
1282 tag: qbase
1282 tag: qbase
1283 tag: qtip
1283 tag: qtip
1284 tag: tip
1284 tag: tip
1285 user: test
1285 user: test
1286 date: Thu Jan 01 00:00:00 1970 +0000
1286 date: Thu Jan 01 00:00:00 1970 +0000
1287 summary: imported patch empty
1287 summary: imported patch empty
1288
1288
1289
1289
1290 $ hg qref -d '0 0'
1290 $ hg qref -d '0 0'
1291 $ hg qpop
1291 $ hg qpop
1292 popping empty
1292 popping empty
1293 patch queue now empty
1293 patch queue now empty
1294 $ echo universe >> hello.txt
1294 $ echo universe >> hello.txt
1295 $ echo universe >> bye.txt
1295 $ echo universe >> bye.txt
1296
1296
1297
1297
1298 qpush should fail, local changes
1298 qpush should fail, local changes
1299
1299
1300 $ hg qpush
1300 $ hg qpush
1301 abort: local changes found, refresh first
1301 abort: local changes found, refresh first
1302 [255]
1302 [255]
1303
1303
1304
1304
1305 apply force, should discard changes in hello, but not bye
1305 apply force, should discard changes in hello, but not bye
1306
1306
1307 $ hg qpush -f
1307 $ hg qpush -f
1308 applying empty
1308 applying empty
1309 now at: empty
1309 now at: empty
1310 $ hg st
1310 $ hg st
1311 M bye.txt
1311 M bye.txt
1312 $ hg diff --config diff.nodates=True
1312 $ hg diff --config diff.nodates=True
1313 diff -r ba252371dbc1 bye.txt
1313 diff -r ba252371dbc1 bye.txt
1314 --- a/bye.txt
1314 --- a/bye.txt
1315 +++ b/bye.txt
1315 +++ b/bye.txt
1316 @@ -1,1 +1,2 @@
1316 @@ -1,1 +1,2 @@
1317 bye
1317 bye
1318 +universe
1318 +universe
1319 $ hg qdiff --config diff.nodates=True
1319 $ hg qdiff --config diff.nodates=True
1320 diff -r 9ecee4f634e3 bye.txt
1320 diff -r 9ecee4f634e3 bye.txt
1321 --- a/bye.txt
1321 --- a/bye.txt
1322 +++ b/bye.txt
1322 +++ b/bye.txt
1323 @@ -1,1 +1,2 @@
1323 @@ -1,1 +1,2 @@
1324 bye
1324 bye
1325 +universe
1325 +universe
1326 diff -r 9ecee4f634e3 hello.txt
1326 diff -r 9ecee4f634e3 hello.txt
1327 --- a/hello.txt
1327 --- a/hello.txt
1328 +++ b/hello.txt
1328 +++ b/hello.txt
1329 @@ -1,1 +1,3 @@
1329 @@ -1,1 +1,3 @@
1330 hello
1330 hello
1331 +world
1331 +world
1332 +universe
1332 +universe
1333
1333
1334
1334
1335 test popping revisions not in working dir ancestry
1335 test popping revisions not in working dir ancestry
1336
1336
1337 $ hg qseries -v
1337 $ hg qseries -v
1338 0 A empty
1338 0 A empty
1339 $ hg up qparent
1339 $ hg up qparent
1340 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1340 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1341 $ hg qpop
1341 $ hg qpop
1342 popping empty
1342 popping empty
1343 patch queue now empty
1343 patch queue now empty
1344
1344
1345 $ cd ..
1345 $ cd ..
1346 $ hg init deletion-order
1346 $ hg init deletion-order
1347 $ cd deletion-order
1347 $ cd deletion-order
1348
1348
1349 $ touch a
1349 $ touch a
1350 $ hg ci -Aqm0
1350 $ hg ci -Aqm0
1351
1351
1352 $ hg qnew rename-dir
1352 $ hg qnew rename-dir
1353 $ hg rm a
1353 $ hg rm a
1354 $ hg qrefresh
1354 $ hg qrefresh
1355
1355
1356 $ mkdir a b
1356 $ mkdir a b
1357 $ touch a/a b/b
1357 $ touch a/a b/b
1358 $ hg add -q a b
1358 $ hg add -q a b
1359 $ hg qrefresh
1359 $ hg qrefresh
1360
1360
1361
1361
1362 test popping must remove files added in subdirectories first
1362 test popping must remove files added in subdirectories first
1363
1363
1364 $ hg qpop
1364 $ hg qpop
1365 popping rename-dir
1365 popping rename-dir
1366 patch queue now empty
1366 patch queue now empty
1367 $ cd ..
1367 $ cd ..
1368
1368
@@ -1,319 +1,319 b''
1 $ branchcache=.hg/branchheads.cache
1 $ branchcache=.hg/cache/branchheads
2
2
3 $ hg init t
3 $ hg init t
4 $ cd t
4 $ cd t
5
5
6 $ hg branches
6 $ hg branches
7 $ echo foo > a
7 $ echo foo > a
8 $ hg add a
8 $ hg add a
9 $ hg ci -m "initial"
9 $ hg ci -m "initial"
10 $ hg branch foo
10 $ hg branch foo
11 marked working directory as branch foo
11 marked working directory as branch foo
12 $ hg branch
12 $ hg branch
13 foo
13 foo
14 $ hg ci -m "add branch name"
14 $ hg ci -m "add branch name"
15 $ hg branch bar
15 $ hg branch bar
16 marked working directory as branch bar
16 marked working directory as branch bar
17 $ hg ci -m "change branch name"
17 $ hg ci -m "change branch name"
18
18
19 Branch shadowing:
19 Branch shadowing:
20
20
21 $ hg branch default
21 $ hg branch default
22 abort: a branch of the same name already exists (use 'hg update' to switch to it)
22 abort: a branch of the same name already exists (use 'hg update' to switch to it)
23 [255]
23 [255]
24
24
25 $ hg branch -f default
25 $ hg branch -f default
26 marked working directory as branch default
26 marked working directory as branch default
27
27
28 $ hg ci -m "clear branch name"
28 $ hg ci -m "clear branch name"
29 created new head
29 created new head
30
30
31 There should be only one default branch head
31 There should be only one default branch head
32
32
33 $ hg heads .
33 $ hg heads .
34 changeset: 3:9d567d0b51f9
34 changeset: 3:9d567d0b51f9
35 tag: tip
35 tag: tip
36 user: test
36 user: test
37 date: Thu Jan 01 00:00:00 1970 +0000
37 date: Thu Jan 01 00:00:00 1970 +0000
38 summary: clear branch name
38 summary: clear branch name
39
39
40
40
41 $ hg co foo
41 $ hg co foo
42 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 $ hg branch
43 $ hg branch
44 foo
44 foo
45 $ echo bleah > a
45 $ echo bleah > a
46 $ hg ci -m "modify a branch"
46 $ hg ci -m "modify a branch"
47
47
48 $ hg merge default
48 $ hg merge default
49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
50 (branch merge, don't forget to commit)
50 (branch merge, don't forget to commit)
51
51
52 $ hg branch
52 $ hg branch
53 foo
53 foo
54 $ hg ci -m "merge"
54 $ hg ci -m "merge"
55
55
56 $ hg log
56 $ hg log
57 changeset: 5:dc140083783b
57 changeset: 5:dc140083783b
58 branch: foo
58 branch: foo
59 tag: tip
59 tag: tip
60 parent: 4:98d14f698afe
60 parent: 4:98d14f698afe
61 parent: 3:9d567d0b51f9
61 parent: 3:9d567d0b51f9
62 user: test
62 user: test
63 date: Thu Jan 01 00:00:00 1970 +0000
63 date: Thu Jan 01 00:00:00 1970 +0000
64 summary: merge
64 summary: merge
65
65
66 changeset: 4:98d14f698afe
66 changeset: 4:98d14f698afe
67 branch: foo
67 branch: foo
68 parent: 1:0079f24813e2
68 parent: 1:0079f24813e2
69 user: test
69 user: test
70 date: Thu Jan 01 00:00:00 1970 +0000
70 date: Thu Jan 01 00:00:00 1970 +0000
71 summary: modify a branch
71 summary: modify a branch
72
72
73 changeset: 3:9d567d0b51f9
73 changeset: 3:9d567d0b51f9
74 user: test
74 user: test
75 date: Thu Jan 01 00:00:00 1970 +0000
75 date: Thu Jan 01 00:00:00 1970 +0000
76 summary: clear branch name
76 summary: clear branch name
77
77
78 changeset: 2:ed2bbf4e0102
78 changeset: 2:ed2bbf4e0102
79 branch: bar
79 branch: bar
80 user: test
80 user: test
81 date: Thu Jan 01 00:00:00 1970 +0000
81 date: Thu Jan 01 00:00:00 1970 +0000
82 summary: change branch name
82 summary: change branch name
83
83
84 changeset: 1:0079f24813e2
84 changeset: 1:0079f24813e2
85 branch: foo
85 branch: foo
86 user: test
86 user: test
87 date: Thu Jan 01 00:00:00 1970 +0000
87 date: Thu Jan 01 00:00:00 1970 +0000
88 summary: add branch name
88 summary: add branch name
89
89
90 changeset: 0:db01e8ea3388
90 changeset: 0:db01e8ea3388
91 user: test
91 user: test
92 date: Thu Jan 01 00:00:00 1970 +0000
92 date: Thu Jan 01 00:00:00 1970 +0000
93 summary: initial
93 summary: initial
94
94
95 $ hg branches
95 $ hg branches
96 foo 5:dc140083783b
96 foo 5:dc140083783b
97 default 3:9d567d0b51f9 (inactive)
97 default 3:9d567d0b51f9 (inactive)
98 bar 2:ed2bbf4e0102 (inactive)
98 bar 2:ed2bbf4e0102 (inactive)
99
99
100 $ hg branches -q
100 $ hg branches -q
101 foo
101 foo
102 default
102 default
103 bar
103 bar
104
104
105 Test for invalid branch cache:
105 Test for invalid branch cache:
106
106
107 $ hg rollback
107 $ hg rollback
108 rolling back to revision 4 (undo commit)
108 rolling back to revision 4 (undo commit)
109
109
110 $ cp $branchcache .hg/bc-invalid
110 $ cp $branchcache .hg/bc-invalid
111
111
112 $ hg log -r foo
112 $ hg log -r foo
113 changeset: 4:98d14f698afe
113 changeset: 4:98d14f698afe
114 branch: foo
114 branch: foo
115 tag: tip
115 tag: tip
116 parent: 1:0079f24813e2
116 parent: 1:0079f24813e2
117 user: test
117 user: test
118 date: Thu Jan 01 00:00:00 1970 +0000
118 date: Thu Jan 01 00:00:00 1970 +0000
119 summary: modify a branch
119 summary: modify a branch
120
120
121 $ cp .hg/bc-invalid $branchcache
121 $ cp .hg/bc-invalid $branchcache
122
122
123 $ hg --debug log -r foo
123 $ hg --debug log -r foo
124 invalidating branch cache (tip differs)
124 invalidating branch cache (tip differs)
125 changeset: 4:98d14f698afeaff8cb612dcf215ce95e639effc3
125 changeset: 4:98d14f698afeaff8cb612dcf215ce95e639effc3
126 branch: foo
126 branch: foo
127 tag: tip
127 tag: tip
128 parent: 1:0079f24813e2b73a891577c243684c5066347bc8
128 parent: 1:0079f24813e2b73a891577c243684c5066347bc8
129 parent: -1:0000000000000000000000000000000000000000
129 parent: -1:0000000000000000000000000000000000000000
130 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
130 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
131 user: test
131 user: test
132 date: Thu Jan 01 00:00:00 1970 +0000
132 date: Thu Jan 01 00:00:00 1970 +0000
133 files: a
133 files: a
134 extra: branch=foo
134 extra: branch=foo
135 description:
135 description:
136 modify a branch
136 modify a branch
137
137
138
138
139 $ rm $branchcache
139 $ rm $branchcache
140 $ echo corrupted > $branchcache
140 $ echo corrupted > $branchcache
141
141
142 $ hg log -qr foo
142 $ hg log -qr foo
143 4:98d14f698afe
143 4:98d14f698afe
144
144
145 $ cat $branchcache
145 $ cat $branchcache
146 98d14f698afeaff8cb612dcf215ce95e639effc3 4
146 98d14f698afeaff8cb612dcf215ce95e639effc3 4
147 9d567d0b51f9e2068b054e1948e1a927f99b5874 default
147 9d567d0b51f9e2068b054e1948e1a927f99b5874 default
148 98d14f698afeaff8cb612dcf215ce95e639effc3 foo
148 98d14f698afeaff8cb612dcf215ce95e639effc3 foo
149 ed2bbf4e01029020711be82ca905283e883f0e11 bar
149 ed2bbf4e01029020711be82ca905283e883f0e11 bar
150
150
151 Push should update the branch cache:
151 Push should update the branch cache:
152
152
153 $ hg init ../target
153 $ hg init ../target
154
154
155 Pushing just rev 0:
155 Pushing just rev 0:
156
156
157 $ hg push -qr 0 ../target
157 $ hg push -qr 0 ../target
158
158
159 $ cat ../target/$branchcache
159 $ cat ../target/$branchcache
160 db01e8ea3388fd3c7c94e1436ea2bd6a53d581c5 0
160 db01e8ea3388fd3c7c94e1436ea2bd6a53d581c5 0
161 db01e8ea3388fd3c7c94e1436ea2bd6a53d581c5 default
161 db01e8ea3388fd3c7c94e1436ea2bd6a53d581c5 default
162
162
163 Pushing everything:
163 Pushing everything:
164
164
165 $ hg push -qf ../target
165 $ hg push -qf ../target
166
166
167 $ cat ../target/$branchcache
167 $ cat ../target/$branchcache
168 98d14f698afeaff8cb612dcf215ce95e639effc3 4
168 98d14f698afeaff8cb612dcf215ce95e639effc3 4
169 9d567d0b51f9e2068b054e1948e1a927f99b5874 default
169 9d567d0b51f9e2068b054e1948e1a927f99b5874 default
170 98d14f698afeaff8cb612dcf215ce95e639effc3 foo
170 98d14f698afeaff8cb612dcf215ce95e639effc3 foo
171 ed2bbf4e01029020711be82ca905283e883f0e11 bar
171 ed2bbf4e01029020711be82ca905283e883f0e11 bar
172
172
173 Update with no arguments: tipmost revision of the current branch:
173 Update with no arguments: tipmost revision of the current branch:
174
174
175 $ hg up -q -C 0
175 $ hg up -q -C 0
176 $ hg up -q
176 $ hg up -q
177 $ hg id
177 $ hg id
178 9d567d0b51f9
178 9d567d0b51f9
179
179
180 $ hg up -q 1
180 $ hg up -q 1
181 $ hg up -q
181 $ hg up -q
182 $ hg id
182 $ hg id
183 98d14f698afe (foo) tip
183 98d14f698afe (foo) tip
184
184
185 $ hg branch foobar
185 $ hg branch foobar
186 marked working directory as branch foobar
186 marked working directory as branch foobar
187
187
188 $ hg up
188 $ hg up
189 abort: branch foobar not found
189 abort: branch foobar not found
190 [255]
190 [255]
191
191
192 Fastforward merge:
192 Fastforward merge:
193
193
194 $ hg branch ff
194 $ hg branch ff
195 marked working directory as branch ff
195 marked working directory as branch ff
196
196
197 $ echo ff > ff
197 $ echo ff > ff
198 $ hg ci -Am'fast forward'
198 $ hg ci -Am'fast forward'
199 adding ff
199 adding ff
200
200
201 $ hg up foo
201 $ hg up foo
202 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
202 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
203
203
204 $ hg merge ff
204 $ hg merge ff
205 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
205 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
206 (branch merge, don't forget to commit)
206 (branch merge, don't forget to commit)
207
207
208 $ hg branch
208 $ hg branch
209 foo
209 foo
210 $ hg commit -m'Merge ff into foo'
210 $ hg commit -m'Merge ff into foo'
211 created new head
211 created new head
212 $ hg parents
212 $ hg parents
213 changeset: 6:6af8030670c9
213 changeset: 6:6af8030670c9
214 branch: foo
214 branch: foo
215 tag: tip
215 tag: tip
216 user: test
216 user: test
217 date: Thu Jan 01 00:00:00 1970 +0000
217 date: Thu Jan 01 00:00:00 1970 +0000
218 summary: Merge ff into foo
218 summary: Merge ff into foo
219
219
220 $ hg manifest
220 $ hg manifest
221 a
221 a
222 ff
222 ff
223
223
224
224
225 Test merging, add 3 default heads and one test head:
225 Test merging, add 3 default heads and one test head:
226
226
227 $ cd ..
227 $ cd ..
228 $ hg init merges
228 $ hg init merges
229 $ cd merges
229 $ cd merges
230 $ echo a > a
230 $ echo a > a
231 $ hg ci -Ama
231 $ hg ci -Ama
232 adding a
232 adding a
233
233
234 $ echo b > b
234 $ echo b > b
235 $ hg ci -Amb
235 $ hg ci -Amb
236 adding b
236 adding b
237
237
238 $ hg up 0
238 $ hg up 0
239 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
239 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
240 $ echo c > c
240 $ echo c > c
241 $ hg ci -Amc
241 $ hg ci -Amc
242 adding c
242 adding c
243 created new head
243 created new head
244
244
245 $ hg up 0
245 $ hg up 0
246 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
246 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
247 $ echo d > d
247 $ echo d > d
248 $ hg ci -Amd
248 $ hg ci -Amd
249 adding d
249 adding d
250 created new head
250 created new head
251
251
252 $ hg up 0
252 $ hg up 0
253 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
253 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
254 $ hg branch test
254 $ hg branch test
255 marked working directory as branch test
255 marked working directory as branch test
256 $ echo e >> e
256 $ echo e >> e
257 $ hg ci -Ame
257 $ hg ci -Ame
258 adding e
258 adding e
259
259
260 $ hg log
260 $ hg log
261 changeset: 4:3a1e01ed1df4
261 changeset: 4:3a1e01ed1df4
262 branch: test
262 branch: test
263 tag: tip
263 tag: tip
264 parent: 0:cb9a9f314b8b
264 parent: 0:cb9a9f314b8b
265 user: test
265 user: test
266 date: Thu Jan 01 00:00:00 1970 +0000
266 date: Thu Jan 01 00:00:00 1970 +0000
267 summary: e
267 summary: e
268
268
269 changeset: 3:980f7dc84c29
269 changeset: 3:980f7dc84c29
270 parent: 0:cb9a9f314b8b
270 parent: 0:cb9a9f314b8b
271 user: test
271 user: test
272 date: Thu Jan 01 00:00:00 1970 +0000
272 date: Thu Jan 01 00:00:00 1970 +0000
273 summary: d
273 summary: d
274
274
275 changeset: 2:d36c0562f908
275 changeset: 2:d36c0562f908
276 parent: 0:cb9a9f314b8b
276 parent: 0:cb9a9f314b8b
277 user: test
277 user: test
278 date: Thu Jan 01 00:00:00 1970 +0000
278 date: Thu Jan 01 00:00:00 1970 +0000
279 summary: c
279 summary: c
280
280
281 changeset: 1:d2ae7f538514
281 changeset: 1:d2ae7f538514
282 user: test
282 user: test
283 date: Thu Jan 01 00:00:00 1970 +0000
283 date: Thu Jan 01 00:00:00 1970 +0000
284 summary: b
284 summary: b
285
285
286 changeset: 0:cb9a9f314b8b
286 changeset: 0:cb9a9f314b8b
287 user: test
287 user: test
288 date: Thu Jan 01 00:00:00 1970 +0000
288 date: Thu Jan 01 00:00:00 1970 +0000
289 summary: a
289 summary: a
290
290
291 Implicit merge with test branch as parent:
291 Implicit merge with test branch as parent:
292
292
293 $ hg merge
293 $ hg merge
294 abort: branch 'test' has one head - please merge with an explicit rev
294 abort: branch 'test' has one head - please merge with an explicit rev
295 (run 'hg heads' to see all heads)
295 (run 'hg heads' to see all heads)
296 [255]
296 [255]
297 $ hg up -C default
297 $ hg up -C default
298 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
298 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
299
299
300 Implicit merge with default branch as parent:
300 Implicit merge with default branch as parent:
301
301
302 $ hg merge
302 $ hg merge
303 abort: branch 'default' has 3 heads - please merge with an explicit rev
303 abort: branch 'default' has 3 heads - please merge with an explicit rev
304 (run 'hg heads .' to see heads)
304 (run 'hg heads .' to see heads)
305 [255]
305 [255]
306
306
307 3 branch heads, explicit merge required:
307 3 branch heads, explicit merge required:
308
308
309 $ hg merge 2
309 $ hg merge 2
310 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
310 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
311 (branch merge, don't forget to commit)
311 (branch merge, don't forget to commit)
312 $ hg ci -m merge
312 $ hg ci -m merge
313
313
314 2 branch heads, implicit merge works:
314 2 branch heads, implicit merge works:
315
315
316 $ hg merge
316 $ hg merge
317 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
317 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
318 (branch merge, don't forget to commit)
318 (branch merge, don't forget to commit)
319
319
@@ -1,157 +1,157 b''
1
1
2 $ cp "$TESTDIR"/printenv.py .
2 $ cp "$TESTDIR"/printenv.py .
3 $ hg clone http://localhost:$HGPORT/ copy
3 $ hg clone http://localhost:$HGPORT/ copy
4 abort: error: Connection refused
4 abort: error: Connection refused
5 [255]
5 [255]
6 $ test -d copy
6 $ test -d copy
7 [1]
7 [1]
8
8
9 This server doesn't do range requests so it's basically only good for
9 This server doesn't do range requests so it's basically only good for
10 one pull
10 one pull
11
11
12 $ cat > dumb.py <<EOF
12 $ cat > dumb.py <<EOF
13 > import BaseHTTPServer, SimpleHTTPServer, os, signal, sys
13 > import BaseHTTPServer, SimpleHTTPServer, os, signal, sys
14 >
14 >
15 > def run(server_class=BaseHTTPServer.HTTPServer,
15 > def run(server_class=BaseHTTPServer.HTTPServer,
16 > handler_class=SimpleHTTPServer.SimpleHTTPRequestHandler):
16 > handler_class=SimpleHTTPServer.SimpleHTTPRequestHandler):
17 > server_address = ('localhost', int(os.environ['HGPORT']))
17 > server_address = ('localhost', int(os.environ['HGPORT']))
18 > httpd = server_class(server_address, handler_class)
18 > httpd = server_class(server_address, handler_class)
19 > httpd.serve_forever()
19 > httpd.serve_forever()
20 >
20 >
21 > signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
21 > signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
22 > run()
22 > run()
23 > EOF
23 > EOF
24 $ python dumb.py 2>/dev/null &
24 $ python dumb.py 2>/dev/null &
25 $ echo $! >> $DAEMON_PIDS
25 $ echo $! >> $DAEMON_PIDS
26 $ mkdir remote
26 $ mkdir remote
27 $ cd remote
27 $ cd remote
28 $ hg init
28 $ hg init
29 $ echo foo > bar
29 $ echo foo > bar
30 $ echo c2 > '.dotfile with spaces'
30 $ echo c2 > '.dotfile with spaces'
31 $ hg add
31 $ hg add
32 adding .dotfile with spaces
32 adding .dotfile with spaces
33 adding bar
33 adding bar
34 $ hg commit -m"test"
34 $ hg commit -m"test"
35 $ hg tip
35 $ hg tip
36 changeset: 0:02770d679fb8
36 changeset: 0:02770d679fb8
37 tag: tip
37 tag: tip
38 user: test
38 user: test
39 date: Thu Jan 01 00:00:00 1970 +0000
39 date: Thu Jan 01 00:00:00 1970 +0000
40 summary: test
40 summary: test
41
41
42 $ cd ..
42 $ cd ..
43 $ hg clone static-http://localhost:$HGPORT/remote local
43 $ hg clone static-http://localhost:$HGPORT/remote local
44 requesting all changes
44 requesting all changes
45 adding changesets
45 adding changesets
46 adding manifests
46 adding manifests
47 adding file changes
47 adding file changes
48 added 1 changesets with 2 changes to 2 files
48 added 1 changesets with 2 changes to 2 files
49 updating to branch default
49 updating to branch default
50 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
50 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
51 $ cd local
51 $ cd local
52 $ hg verify
52 $ hg verify
53 checking changesets
53 checking changesets
54 checking manifests
54 checking manifests
55 crosschecking files in changesets and manifests
55 crosschecking files in changesets and manifests
56 checking files
56 checking files
57 2 files, 1 changesets, 2 total revisions
57 2 files, 1 changesets, 2 total revisions
58 $ cat bar
58 $ cat bar
59 foo
59 foo
60 $ cd ../remote
60 $ cd ../remote
61 $ echo baz > quux
61 $ echo baz > quux
62 $ hg commit -A -mtest2
62 $ hg commit -A -mtest2
63 adding quux
63 adding quux
64
64
65 check for HTTP opener failures when cachefile does not exist
65 check for HTTP opener failures when cachefile does not exist
66
66
67 $ rm .hg/*.cache
67 $ rm .hg/cache/*
68 $ cd ../local
68 $ cd ../local
69 $ echo '[hooks]' >> .hg/hgrc
69 $ echo '[hooks]' >> .hg/hgrc
70 $ echo 'changegroup = python ../printenv.py changegroup' >> .hg/hgrc
70 $ echo 'changegroup = python ../printenv.py changegroup' >> .hg/hgrc
71 $ hg pull
71 $ hg pull
72 changegroup hook: HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/remote
72 changegroup hook: HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/remote
73 pulling from static-http://localhost:$HGPORT/remote
73 pulling from static-http://localhost:$HGPORT/remote
74 searching for changes
74 searching for changes
75 adding changesets
75 adding changesets
76 adding manifests
76 adding manifests
77 adding file changes
77 adding file changes
78 added 1 changesets with 1 changes to 1 files
78 added 1 changesets with 1 changes to 1 files
79 (run 'hg update' to get a working copy)
79 (run 'hg update' to get a working copy)
80
80
81 trying to push
81 trying to push
82
82
83 $ hg update
83 $ hg update
84 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 $ echo more foo >> bar
85 $ echo more foo >> bar
86 $ hg commit -m"test"
86 $ hg commit -m"test"
87 $ hg push
87 $ hg push
88 pushing to static-http://localhost:$HGPORT/remote
88 pushing to static-http://localhost:$HGPORT/remote
89 abort: cannot lock static-http repository
89 abort: cannot lock static-http repository
90 [255]
90 [255]
91
91
92 trying clone -r
92 trying clone -r
93
93
94 $ cd ..
94 $ cd ..
95 $ hg clone -r donotexist static-http://localhost:$HGPORT/remote local0
95 $ hg clone -r donotexist static-http://localhost:$HGPORT/remote local0
96 abort: unknown revision 'donotexist'!
96 abort: unknown revision 'donotexist'!
97 [255]
97 [255]
98 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
98 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
99 adding changesets
99 adding changesets
100 adding manifests
100 adding manifests
101 adding file changes
101 adding file changes
102 added 1 changesets with 2 changes to 2 files
102 added 1 changesets with 2 changes to 2 files
103 updating to branch default
103 updating to branch default
104 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
104 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
105
105
106 test with "/" URI (issue 747)
106 test with "/" URI (issue 747)
107
107
108 $ hg init
108 $ hg init
109 $ echo a > a
109 $ echo a > a
110 $ hg add a
110 $ hg add a
111 $ hg ci -ma
111 $ hg ci -ma
112 $ hg clone static-http://localhost:$HGPORT/ local2
112 $ hg clone static-http://localhost:$HGPORT/ local2
113 requesting all changes
113 requesting all changes
114 adding changesets
114 adding changesets
115 adding manifests
115 adding manifests
116 adding file changes
116 adding file changes
117 added 1 changesets with 1 changes to 1 files
117 added 1 changesets with 1 changes to 1 files
118 updating to branch default
118 updating to branch default
119 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
119 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
120 $ cd local2
120 $ cd local2
121 $ hg verify
121 $ hg verify
122 checking changesets
122 checking changesets
123 checking manifests
123 checking manifests
124 crosschecking files in changesets and manifests
124 crosschecking files in changesets and manifests
125 checking files
125 checking files
126 1 files, 1 changesets, 1 total revisions
126 1 files, 1 changesets, 1 total revisions
127 $ cat a
127 $ cat a
128 a
128 a
129 $ hg paths
129 $ hg paths
130 default = static-http://localhost:$HGPORT/
130 default = static-http://localhost:$HGPORT/
131
131
132 test with empty repo (issue965)
132 test with empty repo (issue965)
133
133
134 $ cd ..
134 $ cd ..
135 $ hg init remotempty
135 $ hg init remotempty
136 $ hg clone static-http://localhost:$HGPORT/remotempty local3
136 $ hg clone static-http://localhost:$HGPORT/remotempty local3
137 no changes found
137 no changes found
138 updating to branch default
138 updating to branch default
139 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
139 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
140 $ cd local3
140 $ cd local3
141 $ hg verify
141 $ hg verify
142 checking changesets
142 checking changesets
143 checking manifests
143 checking manifests
144 crosschecking files in changesets and manifests
144 crosschecking files in changesets and manifests
145 checking files
145 checking files
146 0 files, 0 changesets, 0 total revisions
146 0 files, 0 changesets, 0 total revisions
147 $ hg paths
147 $ hg paths
148 default = static-http://localhost:$HGPORT/remotempty
148 default = static-http://localhost:$HGPORT/remotempty
149
149
150 test with non-repo
150 test with non-repo
151
151
152 $ cd ..
152 $ cd ..
153 $ mkdir notarepo
153 $ mkdir notarepo
154 $ hg clone static-http://localhost:$HGPORT/notarepo local3
154 $ hg clone static-http://localhost:$HGPORT/notarepo local3
155 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository!
155 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository!
156 [255]
156 [255]
157 $ kill $!
157 $ kill $!
@@ -1,381 +1,381 b''
1 Helper functions:
1 Helper functions:
2
2
3 $ cacheexists() {
3 $ cacheexists() {
4 > [ -f .hg/tags.cache ] && echo "tag cache exists" || echo "no tag cache"
4 > [ -f .hg/cache/tags ] && echo "tag cache exists" || echo "no tag cache"
5 > }
5 > }
6
6
7 $ dumptags() {
7 $ dumptags() {
8 > rev=$1
8 > rev=$1
9 > echo "rev $rev: .hgtags:"
9 > echo "rev $rev: .hgtags:"
10 > hg cat -r$rev .hgtags
10 > hg cat -r$rev .hgtags
11 > }
11 > }
12
12
13 # XXX need to test that the tag cache works when we strip an old head
13 # XXX need to test that the tag cache works when we strip an old head
14 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
14 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
15 # same, but stuff has changed behind tip.
15 # same, but stuff has changed behind tip.
16
16
17 Setup:
17 Setup:
18
18
19 $ hg init t
19 $ hg init t
20 $ cd t
20 $ cd t
21 $ cacheexists
21 $ cacheexists
22 no tag cache
22 no tag cache
23 $ hg id
23 $ hg id
24 000000000000 tip
24 000000000000 tip
25 $ cacheexists
25 $ cacheexists
26 no tag cache
26 no tag cache
27 $ echo a > a
27 $ echo a > a
28 $ hg add a
28 $ hg add a
29 $ hg commit -m "test"
29 $ hg commit -m "test"
30 $ hg co
30 $ hg co
31 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 $ hg identify
32 $ hg identify
33 acb14030fe0a tip
33 acb14030fe0a tip
34 $ cacheexists
34 $ cacheexists
35 tag cache exists
35 tag cache exists
36
36
37 Try corrupting the cache
37 Try corrupting the cache
38
38
39 $ printf 'a b' > .hg/tags.cache
39 $ printf 'a b' > .hg/cache/tags
40 $ hg identify
40 $ hg identify
41 .hg/tags.cache is corrupt, rebuilding it
41 .hg/cache/tags is corrupt, rebuilding it
42 acb14030fe0a tip
42 acb14030fe0a tip
43 $ cacheexists
43 $ cacheexists
44 tag cache exists
44 tag cache exists
45 $ hg identify
45 $ hg identify
46 acb14030fe0a tip
46 acb14030fe0a tip
47
47
48 Create local tag with long name:
48 Create local tag with long name:
49
49
50 $ T=`hg identify --debug --id`
50 $ T=`hg identify --debug --id`
51 $ hg tag -l "This is a local tag with a really long name!"
51 $ hg tag -l "This is a local tag with a really long name!"
52 $ hg tags
52 $ hg tags
53 tip 0:acb14030fe0a
53 tip 0:acb14030fe0a
54 This is a local tag with a really long name! 0:acb14030fe0a
54 This is a local tag with a really long name! 0:acb14030fe0a
55 $ rm .hg/localtags
55 $ rm .hg/localtags
56
56
57 Create a tag behind hg's back:
57 Create a tag behind hg's back:
58
58
59 $ echo "$T first" > .hgtags
59 $ echo "$T first" > .hgtags
60 $ cat .hgtags
60 $ cat .hgtags
61 acb14030fe0a21b60322c440ad2d20cf7685a376 first
61 acb14030fe0a21b60322c440ad2d20cf7685a376 first
62 $ hg add .hgtags
62 $ hg add .hgtags
63 $ hg commit -m "add tags"
63 $ hg commit -m "add tags"
64 $ hg tags
64 $ hg tags
65 tip 1:b9154636be93
65 tip 1:b9154636be93
66 first 0:acb14030fe0a
66 first 0:acb14030fe0a
67 $ hg identify
67 $ hg identify
68 b9154636be93 tip
68 b9154636be93 tip
69
69
70 Repeat with cold tag cache:
70 Repeat with cold tag cache:
71
71
72 $ rm -f .hg/tags.cache
72 $ rm -f .hg/cache/tags
73 $ hg identify
73 $ hg identify
74 b9154636be93 tip
74 b9154636be93 tip
75
75
76 And again, but now unable to write tag cache:
76 And again, but now unable to write tag cache:
77
77
78 $ rm -f .hg/tags.cache
78 $ rm -f .hg/cache/tags
79 $ chmod 555 .hg
79 $ chmod 555 .hg
80 $ hg identify
80 $ hg identify
81 b9154636be93 tip
81 b9154636be93 tip
82 $ chmod 755 .hg
82 $ chmod 755 .hg
83
83
84 Create a branch:
84 Create a branch:
85
85
86 $ echo bb > a
86 $ echo bb > a
87 $ hg status
87 $ hg status
88 M a
88 M a
89 $ hg identify
89 $ hg identify
90 b9154636be93+ tip
90 b9154636be93+ tip
91 $ hg co first
91 $ hg co first
92 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
92 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
93 $ hg id
93 $ hg id
94 acb14030fe0a+ first
94 acb14030fe0a+ first
95 $ hg -v id
95 $ hg -v id
96 acb14030fe0a+ first
96 acb14030fe0a+ first
97 $ hg status
97 $ hg status
98 M a
98 M a
99 $ echo 1 > b
99 $ echo 1 > b
100 $ hg add b
100 $ hg add b
101 $ hg commit -m "branch"
101 $ hg commit -m "branch"
102 created new head
102 created new head
103 $ hg id
103 $ hg id
104 c8edf04160c7 tip
104 c8edf04160c7 tip
105
105
106 Merge the two heads:
106 Merge the two heads:
107
107
108 $ hg merge 1
108 $ hg merge 1
109 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 (branch merge, don't forget to commit)
110 (branch merge, don't forget to commit)
111 $ hg id
111 $ hg id
112 c8edf04160c7+b9154636be93+ tip
112 c8edf04160c7+b9154636be93+ tip
113 $ hg status
113 $ hg status
114 M .hgtags
114 M .hgtags
115 $ hg commit -m "merge"
115 $ hg commit -m "merge"
116
116
117 Create a fake head, make sure tag not visible afterwards:
117 Create a fake head, make sure tag not visible afterwards:
118
118
119 $ cp .hgtags tags
119 $ cp .hgtags tags
120 $ hg tag last
120 $ hg tag last
121 $ hg rm .hgtags
121 $ hg rm .hgtags
122 $ hg commit -m "remove"
122 $ hg commit -m "remove"
123
123
124 $ mv tags .hgtags
124 $ mv tags .hgtags
125 $ hg add .hgtags
125 $ hg add .hgtags
126 $ hg commit -m "readd"
126 $ hg commit -m "readd"
127 $
127 $
128 $ hg tags
128 $ hg tags
129 tip 6:35ff301afafe
129 tip 6:35ff301afafe
130 first 0:acb14030fe0a
130 first 0:acb14030fe0a
131
131
132 Add invalid tags:
132 Add invalid tags:
133
133
134 $ echo "spam" >> .hgtags
134 $ echo "spam" >> .hgtags
135 $ echo >> .hgtags
135 $ echo >> .hgtags
136 $ echo "foo bar" >> .hgtags
136 $ echo "foo bar" >> .hgtags
137 $ echo "a5a5 invalid" >> .hg/localtags
137 $ echo "a5a5 invalid" >> .hg/localtags
138 $ echo "committing .hgtags:"
138 $ echo "committing .hgtags:"
139 committing .hgtags:
139 committing .hgtags:
140 $ cat .hgtags
140 $ cat .hgtags
141 acb14030fe0a21b60322c440ad2d20cf7685a376 first
141 acb14030fe0a21b60322c440ad2d20cf7685a376 first
142 spam
142 spam
143
143
144 foo bar
144 foo bar
145 $ hg commit -m "tags"
145 $ hg commit -m "tags"
146
146
147 Report tag parse error on other head:
147 Report tag parse error on other head:
148
148
149 $ hg up 3
149 $ hg up 3
150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 $ echo 'x y' >> .hgtags
151 $ echo 'x y' >> .hgtags
152 $ hg commit -m "head"
152 $ hg commit -m "head"
153 created new head
153 created new head
154
154
155 $ hg tags
155 $ hg tags
156 .hgtags@75d9f02dfe28, line 2: cannot parse entry
156 .hgtags@75d9f02dfe28, line 2: cannot parse entry
157 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
157 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
158 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
158 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
159 tip 8:c4be69a18c11
159 tip 8:c4be69a18c11
160 first 0:acb14030fe0a
160 first 0:acb14030fe0a
161 $ hg tip
161 $ hg tip
162 changeset: 8:c4be69a18c11
162 changeset: 8:c4be69a18c11
163 tag: tip
163 tag: tip
164 parent: 3:ac5e980c4dc0
164 parent: 3:ac5e980c4dc0
165 user: test
165 user: test
166 date: Thu Jan 01 00:00:00 1970 +0000
166 date: Thu Jan 01 00:00:00 1970 +0000
167 summary: head
167 summary: head
168
168
169
169
170 Test tag precedence rules:
170 Test tag precedence rules:
171
171
172 $ cd ..
172 $ cd ..
173 $ hg init t2
173 $ hg init t2
174 $ cd t2
174 $ cd t2
175 $ echo foo > foo
175 $ echo foo > foo
176 $ hg add foo
176 $ hg add foo
177 $ hg ci -m 'add foo' # rev 0
177 $ hg ci -m 'add foo' # rev 0
178 $ hg tag bar # rev 1
178 $ hg tag bar # rev 1
179 $ echo >> foo
179 $ echo >> foo
180 $ hg ci -m 'change foo 1' # rev 2
180 $ hg ci -m 'change foo 1' # rev 2
181 $ hg up -C 1
181 $ hg up -C 1
182 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
182 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
183 $ hg tag -r 1 -f bar # rev 3
183 $ hg tag -r 1 -f bar # rev 3
184 $ hg up -C 1
184 $ hg up -C 1
185 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
185 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
186 $ echo >> foo
186 $ echo >> foo
187 $ hg ci -m 'change foo 2' # rev 4
187 $ hg ci -m 'change foo 2' # rev 4
188 created new head
188 created new head
189 $ hg tags
189 $ hg tags
190 tip 4:0c192d7d5e6b
190 tip 4:0c192d7d5e6b
191 bar 1:78391a272241
191 bar 1:78391a272241
192
192
193 Repeat in case of cache effects:
193 Repeat in case of cache effects:
194
194
195 $ hg tags
195 $ hg tags
196 tip 4:0c192d7d5e6b
196 tip 4:0c192d7d5e6b
197 bar 1:78391a272241
197 bar 1:78391a272241
198
198
199 Detailed dump of tag info:
199 Detailed dump of tag info:
200
200
201 $ hg heads -q # expect 4, 3, 2
201 $ hg heads -q # expect 4, 3, 2
202 4:0c192d7d5e6b
202 4:0c192d7d5e6b
203 3:6fa450212aeb
203 3:6fa450212aeb
204 2:7a94127795a3
204 2:7a94127795a3
205 $ dumptags 2
205 $ dumptags 2
206 rev 2: .hgtags:
206 rev 2: .hgtags:
207 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
207 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
208 $ dumptags 3
208 $ dumptags 3
209 rev 3: .hgtags:
209 rev 3: .hgtags:
210 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
210 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
211 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
211 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
212 78391a272241d70354aa14c874552cad6b51bb42 bar
212 78391a272241d70354aa14c874552cad6b51bb42 bar
213 $ dumptags 4
213 $ dumptags 4
214 rev 4: .hgtags:
214 rev 4: .hgtags:
215 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
215 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
216
216
217 Dump cache:
217 Dump cache:
218
218
219 $ cat .hg/tags.cache
219 $ cat .hg/cache/tags
220 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
220 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
221 3 6fa450212aeb2a21ed616a54aea39a4a27894cd7 7d3b718c964ef37b89e550ebdafd5789e76ce1b0
221 3 6fa450212aeb2a21ed616a54aea39a4a27894cd7 7d3b718c964ef37b89e550ebdafd5789e76ce1b0
222 2 7a94127795a33c10a370c93f731fd9fea0b79af6 0c04f2a8af31de17fab7422878ee5a2dadbc943d
222 2 7a94127795a33c10a370c93f731fd9fea0b79af6 0c04f2a8af31de17fab7422878ee5a2dadbc943d
223
223
224 78391a272241d70354aa14c874552cad6b51bb42 bar
224 78391a272241d70354aa14c874552cad6b51bb42 bar
225
225
226 Test tag removal:
226 Test tag removal:
227
227
228 $ hg tag --remove bar # rev 5
228 $ hg tag --remove bar # rev 5
229 $ hg tip -vp
229 $ hg tip -vp
230 changeset: 5:5f6e8655b1c7
230 changeset: 5:5f6e8655b1c7
231 tag: tip
231 tag: tip
232 user: test
232 user: test
233 date: Thu Jan 01 00:00:00 1970 +0000
233 date: Thu Jan 01 00:00:00 1970 +0000
234 files: .hgtags
234 files: .hgtags
235 description:
235 description:
236 Removed tag bar
236 Removed tag bar
237
237
238
238
239 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
239 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
240 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
240 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
241 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
241 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
242 @@ -1,1 +1,3 @@
242 @@ -1,1 +1,3 @@
243 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
243 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
244 +78391a272241d70354aa14c874552cad6b51bb42 bar
244 +78391a272241d70354aa14c874552cad6b51bb42 bar
245 +0000000000000000000000000000000000000000 bar
245 +0000000000000000000000000000000000000000 bar
246
246
247 $ hg tags
247 $ hg tags
248 tip 5:5f6e8655b1c7
248 tip 5:5f6e8655b1c7
249 $ hg tags # again, try to expose cache bugs
249 $ hg tags # again, try to expose cache bugs
250 tip 5:5f6e8655b1c7
250 tip 5:5f6e8655b1c7
251
251
252 Remove nonexistent tag:
252 Remove nonexistent tag:
253
253
254 $ hg tag --remove foobar
254 $ hg tag --remove foobar
255 abort: tag 'foobar' does not exist
255 abort: tag 'foobar' does not exist
256 [255]
256 [255]
257 $ hg tip
257 $ hg tip
258 changeset: 5:5f6e8655b1c7
258 changeset: 5:5f6e8655b1c7
259 tag: tip
259 tag: tip
260 user: test
260 user: test
261 date: Thu Jan 01 00:00:00 1970 +0000
261 date: Thu Jan 01 00:00:00 1970 +0000
262 summary: Removed tag bar
262 summary: Removed tag bar
263
263
264
264
265 Undo a tag with rollback:
265 Undo a tag with rollback:
266
266
267 $ hg rollback # destroy rev 5 (restore bar)
267 $ hg rollback # destroy rev 5 (restore bar)
268 rolling back to revision 4 (undo commit)
268 rolling back to revision 4 (undo commit)
269 $ hg tags
269 $ hg tags
270 tip 4:0c192d7d5e6b
270 tip 4:0c192d7d5e6b
271 bar 1:78391a272241
271 bar 1:78391a272241
272 $ hg tags
272 $ hg tags
273 tip 4:0c192d7d5e6b
273 tip 4:0c192d7d5e6b
274 bar 1:78391a272241
274 bar 1:78391a272241
275
275
276 Test tag rank:
276 Test tag rank:
277
277
278 $ cd ..
278 $ cd ..
279 $ hg init t3
279 $ hg init t3
280 $ cd t3
280 $ cd t3
281 $ echo foo > foo
281 $ echo foo > foo
282 $ hg add foo
282 $ hg add foo
283 $ hg ci -m 'add foo' # rev 0
283 $ hg ci -m 'add foo' # rev 0
284 $ hg tag -f bar # rev 1 bar -> 0
284 $ hg tag -f bar # rev 1 bar -> 0
285 $ hg tag -f bar # rev 2 bar -> 1
285 $ hg tag -f bar # rev 2 bar -> 1
286 $ hg tag -fr 0 bar # rev 3 bar -> 0
286 $ hg tag -fr 0 bar # rev 3 bar -> 0
287 $ hg tag -fr 1 bar # rev 4 bar -> 1
287 $ hg tag -fr 1 bar # rev 4 bar -> 1
288 $ hg tag -fr 0 bar # rev 5 bar -> 0
288 $ hg tag -fr 0 bar # rev 5 bar -> 0
289 $ hg tags
289 $ hg tags
290 tip 5:85f05169d91d
290 tip 5:85f05169d91d
291 bar 0:bbd179dfa0a7
291 bar 0:bbd179dfa0a7
292 $ hg co 3
292 $ hg co 3
293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
294 $ echo barbar > foo
294 $ echo barbar > foo
295 $ hg ci -m 'change foo' # rev 6
295 $ hg ci -m 'change foo' # rev 6
296 created new head
296 created new head
297 $ hg tags
297 $ hg tags
298 tip 6:735c3ca72986
298 tip 6:735c3ca72986
299 bar 0:bbd179dfa0a7
299 bar 0:bbd179dfa0a7
300
300
301 Don't allow moving tag without -f:
301 Don't allow moving tag without -f:
302
302
303 $ hg tag -r 3 bar
303 $ hg tag -r 3 bar
304 abort: tag 'bar' already exists (use -f to force)
304 abort: tag 'bar' already exists (use -f to force)
305 [255]
305 [255]
306 $ hg tags
306 $ hg tags
307 tip 6:735c3ca72986
307 tip 6:735c3ca72986
308 bar 0:bbd179dfa0a7
308 bar 0:bbd179dfa0a7
309
309
310 Strip 1: expose an old head:
310 Strip 1: expose an old head:
311
311
312 $ hg --config extensions.mq= strip 5
312 $ hg --config extensions.mq= strip 5
313 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
313 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
314 $ hg tags # partly stale cache
314 $ hg tags # partly stale cache
315 tip 5:735c3ca72986
315 tip 5:735c3ca72986
316 bar 1:78391a272241
316 bar 1:78391a272241
317 $ hg tags # up-to-date cache
317 $ hg tags # up-to-date cache
318 tip 5:735c3ca72986
318 tip 5:735c3ca72986
319 bar 1:78391a272241
319 bar 1:78391a272241
320
320
321 Strip 2: destroy whole branch, no old head exposed
321 Strip 2: destroy whole branch, no old head exposed
322
322
323 $ hg --config extensions.mq= strip 4
323 $ hg --config extensions.mq= strip 4
324 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
324 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
325 $ hg tags # partly stale
325 $ hg tags # partly stale
326 tip 4:735c3ca72986
326 tip 4:735c3ca72986
327 bar 0:bbd179dfa0a7
327 bar 0:bbd179dfa0a7
328 $ rm -f .hg/tags.cache
328 $ rm -f .hg/cache/tags
329 $ hg tags # cold cache
329 $ hg tags # cold cache
330 tip 4:735c3ca72986
330 tip 4:735c3ca72986
331 bar 0:bbd179dfa0a7
331 bar 0:bbd179dfa0a7
332
332
333 Test tag rank with 3 heads:
333 Test tag rank with 3 heads:
334
334
335 $ cd ..
335 $ cd ..
336 $ hg init t4
336 $ hg init t4
337 $ cd t4
337 $ cd t4
338 $ echo foo > foo
338 $ echo foo > foo
339 $ hg add
339 $ hg add
340 adding foo
340 adding foo
341 $ hg ci -m 'add foo' # rev 0
341 $ hg ci -m 'add foo' # rev 0
342 $ hg tag bar # rev 1 bar -> 0
342 $ hg tag bar # rev 1 bar -> 0
343 $ hg tag -f bar # rev 2 bar -> 1
343 $ hg tag -f bar # rev 2 bar -> 1
344 $ hg up -qC 0
344 $ hg up -qC 0
345 $ hg tag -fr 2 bar # rev 3 bar -> 2
345 $ hg tag -fr 2 bar # rev 3 bar -> 2
346 $ hg tags
346 $ hg tags
347 tip 3:197c21bbbf2c
347 tip 3:197c21bbbf2c
348 bar 2:6fa450212aeb
348 bar 2:6fa450212aeb
349 $ hg up -qC 0
349 $ hg up -qC 0
350 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
350 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
351
351
352 Bar should still point to rev 2:
352 Bar should still point to rev 2:
353
353
354 $ hg tags
354 $ hg tags
355 tip 4:3b4b14ed0202
355 tip 4:3b4b14ed0202
356 bar 2:6fa450212aeb
356 bar 2:6fa450212aeb
357
357
358 Test that removing global/local tags does not get confused when trying
358 Test that removing global/local tags does not get confused when trying
359 to remove a tag of type X which actually only exists as a type Y:
359 to remove a tag of type X which actually only exists as a type Y:
360
360
361 $ cd ..
361 $ cd ..
362 $ hg init t5
362 $ hg init t5
363 $ cd t5
363 $ cd t5
364 $ echo foo > foo
364 $ echo foo > foo
365 $ hg add
365 $ hg add
366 adding foo
366 adding foo
367 $ hg ci -m 'add foo' # rev 0
367 $ hg ci -m 'add foo' # rev 0
368
368
369 $ hg tag -r 0 -l localtag
369 $ hg tag -r 0 -l localtag
370 $ hg tag --remove localtag
370 $ hg tag --remove localtag
371 abort: tag 'localtag' is not a global tag
371 abort: tag 'localtag' is not a global tag
372 [255]
372 [255]
373 $
373 $
374 $ hg tag -r 0 globaltag
374 $ hg tag -r 0 globaltag
375 $ hg tag --remove -l globaltag
375 $ hg tag --remove -l globaltag
376 abort: tag 'globaltag' is not a local tag
376 abort: tag 'globaltag' is not a local tag
377 [255]
377 [255]
378 $ hg tags -v
378 $ hg tags -v
379 tip 1:a0b6fe111088
379 tip 1:a0b6fe111088
380 localtag 0:bbd179dfa0a7 local
380 localtag 0:bbd179dfa0a7 local
381 globaltag 0:bbd179dfa0a7
381 globaltag 0:bbd179dfa0a7
General Comments 0
You need to be logged in to leave comments. Login now