##// END OF EJS Templates
tags: implement persistent tag caching (issue548)....
Greg Ward -
r9151:f528d1a9 default
parent child Browse files
Show More
@@ -1,2135 +1,2144 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self.branchcache = None
101 self.branchcache = None
102 self._ubranchcache = None # UTF-8 version of branchcache
102 self._ubranchcache = None # UTF-8 version of branchcache
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.defversion = c.version
116 self.sopener.defversion = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __nonzero__(self):
132 def __nonzero__(self):
133 return True
133 return True
134
134
135 def __len__(self):
135 def __len__(self):
136 return len(self.changelog)
136 return len(self.changelog)
137
137
138 def __iter__(self):
138 def __iter__(self):
139 for i in xrange(len(self)):
139 for i in xrange(len(self)):
140 yield i
140 yield i
141
141
142 def url(self):
142 def url(self):
143 return 'file:' + self.root
143 return 'file:' + self.root
144
144
145 def hook(self, name, throw=False, **args):
145 def hook(self, name, throw=False, **args):
146 return hook.hook(self.ui, self, name, throw, **args)
146 return hook.hook(self.ui, self, name, throw, **args)
147
147
148 tag_disallowed = ':\r\n'
148 tag_disallowed = ':\r\n'
149
149
150 def _tag(self, names, node, message, local, user, date, extra={}):
150 def _tag(self, names, node, message, local, user, date, extra={}):
151 if isinstance(names, str):
151 if isinstance(names, str):
152 allchars = names
152 allchars = names
153 names = (names,)
153 names = (names,)
154 else:
154 else:
155 allchars = ''.join(names)
155 allchars = ''.join(names)
156 for c in self.tag_disallowed:
156 for c in self.tag_disallowed:
157 if c in allchars:
157 if c in allchars:
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
159
159
160 for name in names:
160 for name in names:
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
162 local=local)
162 local=local)
163
163
164 def writetags(fp, names, munge, prevtags):
164 def writetags(fp, names, munge, prevtags):
165 fp.seek(0, 2)
165 fp.seek(0, 2)
166 if prevtags and prevtags[-1] != '\n':
166 if prevtags and prevtags[-1] != '\n':
167 fp.write('\n')
167 fp.write('\n')
168 for name in names:
168 for name in names:
169 m = munge and munge(name) or name
169 m = munge and munge(name) or name
170 if self._tagtypes and name in self._tagtypes:
170 if self._tagtypes and name in self._tagtypes:
171 old = self._tags.get(name, nullid)
171 old = self._tags.get(name, nullid)
172 fp.write('%s %s\n' % (hex(old), m))
172 fp.write('%s %s\n' % (hex(old), m))
173 fp.write('%s %s\n' % (hex(node), m))
173 fp.write('%s %s\n' % (hex(node), m))
174 fp.close()
174 fp.close()
175
175
176 prevtags = ''
176 prevtags = ''
177 if local:
177 if local:
178 try:
178 try:
179 fp = self.opener('localtags', 'r+')
179 fp = self.opener('localtags', 'r+')
180 except IOError:
180 except IOError:
181 fp = self.opener('localtags', 'a')
181 fp = self.opener('localtags', 'a')
182 else:
182 else:
183 prevtags = fp.read()
183 prevtags = fp.read()
184
184
185 # local tags are stored in the current charset
185 # local tags are stored in the current charset
186 writetags(fp, names, None, prevtags)
186 writetags(fp, names, None, prevtags)
187 for name in names:
187 for name in names:
188 self.hook('tag', node=hex(node), tag=name, local=local)
188 self.hook('tag', node=hex(node), tag=name, local=local)
189 return
189 return
190
190
191 try:
191 try:
192 fp = self.wfile('.hgtags', 'rb+')
192 fp = self.wfile('.hgtags', 'rb+')
193 except IOError:
193 except IOError:
194 fp = self.wfile('.hgtags', 'ab')
194 fp = self.wfile('.hgtags', 'ab')
195 else:
195 else:
196 prevtags = fp.read()
196 prevtags = fp.read()
197
197
198 # committed tags are stored in UTF-8
198 # committed tags are stored in UTF-8
199 writetags(fp, names, encoding.fromlocal, prevtags)
199 writetags(fp, names, encoding.fromlocal, prevtags)
200
200
201 if '.hgtags' not in self.dirstate:
201 if '.hgtags' not in self.dirstate:
202 self.add(['.hgtags'])
202 self.add(['.hgtags'])
203
203
204 m = match_.exact(self.root, '', ['.hgtags'])
204 m = match_.exact(self.root, '', ['.hgtags'])
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
206
206
207 for name in names:
207 for name in names:
208 self.hook('tag', node=hex(node), tag=name, local=local)
208 self.hook('tag', node=hex(node), tag=name, local=local)
209
209
210 return tagnode
210 return tagnode
211
211
212 def tag(self, names, node, message, local, user, date):
212 def tag(self, names, node, message, local, user, date):
213 '''tag a revision with one or more symbolic names.
213 '''tag a revision with one or more symbolic names.
214
214
215 names is a list of strings or, when adding a single tag, names may be a
215 names is a list of strings or, when adding a single tag, names may be a
216 string.
216 string.
217
217
218 if local is True, the tags are stored in a per-repository file.
218 if local is True, the tags are stored in a per-repository file.
219 otherwise, they are stored in the .hgtags file, and a new
219 otherwise, they are stored in the .hgtags file, and a new
220 changeset is committed with the change.
220 changeset is committed with the change.
221
221
222 keyword arguments:
222 keyword arguments:
223
223
224 local: whether to store tags in non-version-controlled file
224 local: whether to store tags in non-version-controlled file
225 (default False)
225 (default False)
226
226
227 message: commit message to use if committing
227 message: commit message to use if committing
228
228
229 user: name of user to use if committing
229 user: name of user to use if committing
230
230
231 date: date tuple to use if committing'''
231 date: date tuple to use if committing'''
232
232
233 for x in self.status()[:5]:
233 for x in self.status()[:5]:
234 if '.hgtags' in x:
234 if '.hgtags' in x:
235 raise util.Abort(_('working copy of .hgtags is changed '
235 raise util.Abort(_('working copy of .hgtags is changed '
236 '(please commit .hgtags manually)'))
236 '(please commit .hgtags manually)'))
237
237
238 self.tags() # instantiate the cache
238 self.tags() # instantiate the cache
239 self._tag(names, node, message, local, user, date)
239 self._tag(names, node, message, local, user, date)
240
240
241 def tags(self):
241 def tags(self):
242 '''return a mapping of tag to node'''
242 '''return a mapping of tag to node'''
243 if self._tags is None:
243 if self._tags is None:
244 (self._tags, self._tagtypes) = self._findtags()
244 (self._tags, self._tagtypes) = self._findtags()
245
245
246 return self._tags
246 return self._tags
247
247
248 def _findtags(self):
248 def _findtags(self):
249 '''Do the hard work of finding tags. Return a pair of dicts
249 '''Do the hard work of finding tags. Return a pair of dicts
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
251 maps tag name to a string like \'global\' or \'local\'.
251 maps tag name to a string like \'global\' or \'local\'.
252 Subclasses or extensions are free to add their own tags, but
252 Subclasses or extensions are free to add their own tags, but
253 should be aware that the returned dicts will be retained for the
253 should be aware that the returned dicts will be retained for the
254 duration of the localrepo object.'''
254 duration of the localrepo object.'''
255
255
256 # XXX what tagtype should subclasses/extensions use? Currently
256 # XXX what tagtype should subclasses/extensions use? Currently
257 # mq and bookmarks add tags, but do not set the tagtype at all.
257 # mq and bookmarks add tags, but do not set the tagtype at all.
258 # Should each extension invent its own tag type? Should there
258 # Should each extension invent its own tag type? Should there
259 # be one tagtype for all such "virtual" tags? Or is the status
259 # be one tagtype for all such "virtual" tags? Or is the status
260 # quo fine?
260 # quo fine?
261
261
262 alltags = {} # map tag name to (node, hist)
262 alltags = {} # map tag name to (node, hist)
263 tagtypes = {}
263 tagtypes = {}
264
264
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
267
267
268 tags = {}
268 tags = {}
269 for (name, (node, hist)) in alltags.iteritems():
269 for (name, (node, hist)) in alltags.iteritems():
270 if node != nullid:
270 if node != nullid:
271 tags[name] = node
271 tags[name] = node
272 tags['tip'] = self.changelog.tip()
272 tags['tip'] = self.changelog.tip()
273 return (tags, tagtypes)
273 return (tags, tagtypes)
274
274
275 def tagtype(self, tagname):
275 def tagtype(self, tagname):
276 '''
276 '''
277 return the type of the given tag. result can be:
277 return the type of the given tag. result can be:
278
278
279 'local' : a local tag
279 'local' : a local tag
280 'global' : a global tag
280 'global' : a global tag
281 None : tag does not exist
281 None : tag does not exist
282 '''
282 '''
283
283
284 self.tags()
284 self.tags()
285
285
286 return self._tagtypes.get(tagname)
286 return self._tagtypes.get(tagname)
287
287
288 def tagslist(self):
288 def tagslist(self):
289 '''return a list of tags ordered by revision'''
289 '''return a list of tags ordered by revision'''
290 l = []
290 l = []
291 for t, n in self.tags().iteritems():
291 for t, n in self.tags().iteritems():
292 try:
292 try:
293 r = self.changelog.rev(n)
293 r = self.changelog.rev(n)
294 except:
294 except:
295 r = -2 # sort to the beginning of the list if unknown
295 r = -2 # sort to the beginning of the list if unknown
296 l.append((r, t, n))
296 l.append((r, t, n))
297 return [(t, n) for r, t, n in sorted(l)]
297 return [(t, n) for r, t, n in sorted(l)]
298
298
299 def nodetags(self, node):
299 def nodetags(self, node):
300 '''return the tags associated with a node'''
300 '''return the tags associated with a node'''
301 if not self.nodetagscache:
301 if not self.nodetagscache:
302 self.nodetagscache = {}
302 self.nodetagscache = {}
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 self.nodetagscache.setdefault(n, []).append(t)
304 self.nodetagscache.setdefault(n, []).append(t)
305 return self.nodetagscache.get(node, [])
305 return self.nodetagscache.get(node, [])
306
306
307 def _branchtags(self, partial, lrev):
307 def _branchtags(self, partial, lrev):
308 # TODO: rename this function?
308 # TODO: rename this function?
309 tiprev = len(self) - 1
309 tiprev = len(self) - 1
310 if lrev != tiprev:
310 if lrev != tiprev:
311 self._updatebranchcache(partial, lrev+1, tiprev+1)
311 self._updatebranchcache(partial, lrev+1, tiprev+1)
312 self._writebranchcache(partial, self.changelog.tip(), tiprev)
312 self._writebranchcache(partial, self.changelog.tip(), tiprev)
313
313
314 return partial
314 return partial
315
315
316 def branchmap(self):
316 def branchmap(self):
317 tip = self.changelog.tip()
317 tip = self.changelog.tip()
318 if self.branchcache is not None and self._branchcachetip == tip:
318 if self.branchcache is not None and self._branchcachetip == tip:
319 return self.branchcache
319 return self.branchcache
320
320
321 oldtip = self._branchcachetip
321 oldtip = self._branchcachetip
322 self._branchcachetip = tip
322 self._branchcachetip = tip
323 if self.branchcache is None:
323 if self.branchcache is None:
324 self.branchcache = {} # avoid recursion in changectx
324 self.branchcache = {} # avoid recursion in changectx
325 else:
325 else:
326 self.branchcache.clear() # keep using the same dict
326 self.branchcache.clear() # keep using the same dict
327 if oldtip is None or oldtip not in self.changelog.nodemap:
327 if oldtip is None or oldtip not in self.changelog.nodemap:
328 partial, last, lrev = self._readbranchcache()
328 partial, last, lrev = self._readbranchcache()
329 else:
329 else:
330 lrev = self.changelog.rev(oldtip)
330 lrev = self.changelog.rev(oldtip)
331 partial = self._ubranchcache
331 partial = self._ubranchcache
332
332
333 self._branchtags(partial, lrev)
333 self._branchtags(partial, lrev)
334 # this private cache holds all heads (not just tips)
334 # this private cache holds all heads (not just tips)
335 self._ubranchcache = partial
335 self._ubranchcache = partial
336
336
337 # the branch cache is stored on disk as UTF-8, but in the local
337 # the branch cache is stored on disk as UTF-8, but in the local
338 # charset internally
338 # charset internally
339 for k, v in partial.iteritems():
339 for k, v in partial.iteritems():
340 self.branchcache[encoding.tolocal(k)] = v
340 self.branchcache[encoding.tolocal(k)] = v
341 return self.branchcache
341 return self.branchcache
342
342
343
343
344 def branchtags(self):
344 def branchtags(self):
345 '''return a dict where branch names map to the tipmost head of
345 '''return a dict where branch names map to the tipmost head of
346 the branch, open heads come before closed'''
346 the branch, open heads come before closed'''
347 bt = {}
347 bt = {}
348 for bn, heads in self.branchmap().iteritems():
348 for bn, heads in self.branchmap().iteritems():
349 head = None
349 head = None
350 for i in range(len(heads)-1, -1, -1):
350 for i in range(len(heads)-1, -1, -1):
351 h = heads[i]
351 h = heads[i]
352 if 'close' not in self.changelog.read(h)[5]:
352 if 'close' not in self.changelog.read(h)[5]:
353 head = h
353 head = h
354 break
354 break
355 # no open heads were found
355 # no open heads were found
356 if head is None:
356 if head is None:
357 head = heads[-1]
357 head = heads[-1]
358 bt[bn] = head
358 bt[bn] = head
359 return bt
359 return bt
360
360
361
361
362 def _readbranchcache(self):
362 def _readbranchcache(self):
363 partial = {}
363 partial = {}
364 try:
364 try:
365 f = self.opener("branchheads.cache")
365 f = self.opener("branchheads.cache")
366 lines = f.read().split('\n')
366 lines = f.read().split('\n')
367 f.close()
367 f.close()
368 except (IOError, OSError):
368 except (IOError, OSError):
369 return {}, nullid, nullrev
369 return {}, nullid, nullrev
370
370
371 try:
371 try:
372 last, lrev = lines.pop(0).split(" ", 1)
372 last, lrev = lines.pop(0).split(" ", 1)
373 last, lrev = bin(last), int(lrev)
373 last, lrev = bin(last), int(lrev)
374 if lrev >= len(self) or self[lrev].node() != last:
374 if lrev >= len(self) or self[lrev].node() != last:
375 # invalidate the cache
375 # invalidate the cache
376 raise ValueError('invalidating branch cache (tip differs)')
376 raise ValueError('invalidating branch cache (tip differs)')
377 for l in lines:
377 for l in lines:
378 if not l: continue
378 if not l: continue
379 node, label = l.split(" ", 1)
379 node, label = l.split(" ", 1)
380 partial.setdefault(label.strip(), []).append(bin(node))
380 partial.setdefault(label.strip(), []).append(bin(node))
381 except KeyboardInterrupt:
381 except KeyboardInterrupt:
382 raise
382 raise
383 except Exception, inst:
383 except Exception, inst:
384 if self.ui.debugflag:
384 if self.ui.debugflag:
385 self.ui.warn(str(inst), '\n')
385 self.ui.warn(str(inst), '\n')
386 partial, last, lrev = {}, nullid, nullrev
386 partial, last, lrev = {}, nullid, nullrev
387 return partial, last, lrev
387 return partial, last, lrev
388
388
389 def _writebranchcache(self, branches, tip, tiprev):
389 def _writebranchcache(self, branches, tip, tiprev):
390 try:
390 try:
391 f = self.opener("branchheads.cache", "w", atomictemp=True)
391 f = self.opener("branchheads.cache", "w", atomictemp=True)
392 f.write("%s %s\n" % (hex(tip), tiprev))
392 f.write("%s %s\n" % (hex(tip), tiprev))
393 for label, nodes in branches.iteritems():
393 for label, nodes in branches.iteritems():
394 for node in nodes:
394 for node in nodes:
395 f.write("%s %s\n" % (hex(node), label))
395 f.write("%s %s\n" % (hex(node), label))
396 f.rename()
396 f.rename()
397 except (IOError, OSError):
397 except (IOError, OSError):
398 pass
398 pass
399
399
400 def _updatebranchcache(self, partial, start, end):
400 def _updatebranchcache(self, partial, start, end):
401 # collect new branch entries
401 # collect new branch entries
402 newbranches = {}
402 newbranches = {}
403 for r in xrange(start, end):
403 for r in xrange(start, end):
404 c = self[r]
404 c = self[r]
405 newbranches.setdefault(c.branch(), []).append(c.node())
405 newbranches.setdefault(c.branch(), []).append(c.node())
406 # if older branchheads are reachable from new ones, they aren't
406 # if older branchheads are reachable from new ones, they aren't
407 # really branchheads. Note checking parents is insufficient:
407 # really branchheads. Note checking parents is insufficient:
408 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
408 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 for branch, newnodes in newbranches.iteritems():
409 for branch, newnodes in newbranches.iteritems():
410 bheads = partial.setdefault(branch, [])
410 bheads = partial.setdefault(branch, [])
411 bheads.extend(newnodes)
411 bheads.extend(newnodes)
412 if len(bheads) < 2:
412 if len(bheads) < 2:
413 continue
413 continue
414 newbheads = []
414 newbheads = []
415 # starting from tip means fewer passes over reachable
415 # starting from tip means fewer passes over reachable
416 while newnodes:
416 while newnodes:
417 latest = newnodes.pop()
417 latest = newnodes.pop()
418 if latest not in bheads:
418 if latest not in bheads:
419 continue
419 continue
420 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
420 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 reachable = self.changelog.reachable(latest, minbhrev)
421 reachable = self.changelog.reachable(latest, minbhrev)
422 bheads = [b for b in bheads if b not in reachable]
422 bheads = [b for b in bheads if b not in reachable]
423 newbheads.insert(0, latest)
423 newbheads.insert(0, latest)
424 bheads.extend(newbheads)
424 bheads.extend(newbheads)
425 partial[branch] = bheads
425 partial[branch] = bheads
426
426
427 def lookup(self, key):
427 def lookup(self, key):
428 if isinstance(key, int):
428 if isinstance(key, int):
429 return self.changelog.node(key)
429 return self.changelog.node(key)
430 elif key == '.':
430 elif key == '.':
431 return self.dirstate.parents()[0]
431 return self.dirstate.parents()[0]
432 elif key == 'null':
432 elif key == 'null':
433 return nullid
433 return nullid
434 elif key == 'tip':
434 elif key == 'tip':
435 return self.changelog.tip()
435 return self.changelog.tip()
436 n = self.changelog._match(key)
436 n = self.changelog._match(key)
437 if n:
437 if n:
438 return n
438 return n
439 if key in self.tags():
439 if key in self.tags():
440 return self.tags()[key]
440 return self.tags()[key]
441 if key in self.branchtags():
441 if key in self.branchtags():
442 return self.branchtags()[key]
442 return self.branchtags()[key]
443 n = self.changelog._partialmatch(key)
443 n = self.changelog._partialmatch(key)
444 if n:
444 if n:
445 return n
445 return n
446
446
447 # can't find key, check if it might have come from damaged dirstate
447 # can't find key, check if it might have come from damaged dirstate
448 if key in self.dirstate.parents():
448 if key in self.dirstate.parents():
449 raise error.Abort(_("working directory has unknown parent '%s'!")
449 raise error.Abort(_("working directory has unknown parent '%s'!")
450 % short(key))
450 % short(key))
451 try:
451 try:
452 if len(key) == 20:
452 if len(key) == 20:
453 key = hex(key)
453 key = hex(key)
454 except:
454 except:
455 pass
455 pass
456 raise error.RepoError(_("unknown revision '%s'") % key)
456 raise error.RepoError(_("unknown revision '%s'") % key)
457
457
458 def local(self):
458 def local(self):
459 return True
459 return True
460
460
461 def join(self, f):
461 def join(self, f):
462 return os.path.join(self.path, f)
462 return os.path.join(self.path, f)
463
463
464 def wjoin(self, f):
464 def wjoin(self, f):
465 return os.path.join(self.root, f)
465 return os.path.join(self.root, f)
466
466
467 def rjoin(self, f):
467 def rjoin(self, f):
468 return os.path.join(self.root, util.pconvert(f))
468 return os.path.join(self.root, util.pconvert(f))
469
469
470 def file(self, f):
470 def file(self, f):
471 if f[0] == '/':
471 if f[0] == '/':
472 f = f[1:]
472 f = f[1:]
473 return filelog.filelog(self.sopener, f)
473 return filelog.filelog(self.sopener, f)
474
474
475 def changectx(self, changeid):
475 def changectx(self, changeid):
476 return self[changeid]
476 return self[changeid]
477
477
478 def parents(self, changeid=None):
478 def parents(self, changeid=None):
479 '''get list of changectxs for parents of changeid'''
479 '''get list of changectxs for parents of changeid'''
480 return self[changeid].parents()
480 return self[changeid].parents()
481
481
482 def filectx(self, path, changeid=None, fileid=None):
482 def filectx(self, path, changeid=None, fileid=None):
483 """changeid can be a changeset revision, node, or tag.
483 """changeid can be a changeset revision, node, or tag.
484 fileid can be a file revision or node."""
484 fileid can be a file revision or node."""
485 return context.filectx(self, path, changeid, fileid)
485 return context.filectx(self, path, changeid, fileid)
486
486
487 def getcwd(self):
487 def getcwd(self):
488 return self.dirstate.getcwd()
488 return self.dirstate.getcwd()
489
489
490 def pathto(self, f, cwd=None):
490 def pathto(self, f, cwd=None):
491 return self.dirstate.pathto(f, cwd)
491 return self.dirstate.pathto(f, cwd)
492
492
493 def wfile(self, f, mode='r'):
493 def wfile(self, f, mode='r'):
494 return self.wopener(f, mode)
494 return self.wopener(f, mode)
495
495
496 def _link(self, f):
496 def _link(self, f):
497 return os.path.islink(self.wjoin(f))
497 return os.path.islink(self.wjoin(f))
498
498
499 def _filter(self, filter, filename, data):
499 def _filter(self, filter, filename, data):
500 if filter not in self.filterpats:
500 if filter not in self.filterpats:
501 l = []
501 l = []
502 for pat, cmd in self.ui.configitems(filter):
502 for pat, cmd in self.ui.configitems(filter):
503 if cmd == '!':
503 if cmd == '!':
504 continue
504 continue
505 mf = match_.match(self.root, '', [pat])
505 mf = match_.match(self.root, '', [pat])
506 fn = None
506 fn = None
507 params = cmd
507 params = cmd
508 for name, filterfn in self._datafilters.iteritems():
508 for name, filterfn in self._datafilters.iteritems():
509 if cmd.startswith(name):
509 if cmd.startswith(name):
510 fn = filterfn
510 fn = filterfn
511 params = cmd[len(name):].lstrip()
511 params = cmd[len(name):].lstrip()
512 break
512 break
513 if not fn:
513 if not fn:
514 fn = lambda s, c, **kwargs: util.filter(s, c)
514 fn = lambda s, c, **kwargs: util.filter(s, c)
515 # Wrap old filters not supporting keyword arguments
515 # Wrap old filters not supporting keyword arguments
516 if not inspect.getargspec(fn)[2]:
516 if not inspect.getargspec(fn)[2]:
517 oldfn = fn
517 oldfn = fn
518 fn = lambda s, c, **kwargs: oldfn(s, c)
518 fn = lambda s, c, **kwargs: oldfn(s, c)
519 l.append((mf, fn, params))
519 l.append((mf, fn, params))
520 self.filterpats[filter] = l
520 self.filterpats[filter] = l
521
521
522 for mf, fn, cmd in self.filterpats[filter]:
522 for mf, fn, cmd in self.filterpats[filter]:
523 if mf(filename):
523 if mf(filename):
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
525 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 break
526 break
527
527
528 return data
528 return data
529
529
530 def adddatafilter(self, name, filter):
530 def adddatafilter(self, name, filter):
531 self._datafilters[name] = filter
531 self._datafilters[name] = filter
532
532
533 def wread(self, filename):
533 def wread(self, filename):
534 if self._link(filename):
534 if self._link(filename):
535 data = os.readlink(self.wjoin(filename))
535 data = os.readlink(self.wjoin(filename))
536 else:
536 else:
537 data = self.wopener(filename, 'r').read()
537 data = self.wopener(filename, 'r').read()
538 return self._filter("encode", filename, data)
538 return self._filter("encode", filename, data)
539
539
540 def wwrite(self, filename, data, flags):
540 def wwrite(self, filename, data, flags):
541 data = self._filter("decode", filename, data)
541 data = self._filter("decode", filename, data)
542 try:
542 try:
543 os.unlink(self.wjoin(filename))
543 os.unlink(self.wjoin(filename))
544 except OSError:
544 except OSError:
545 pass
545 pass
546 if 'l' in flags:
546 if 'l' in flags:
547 self.wopener.symlink(data, filename)
547 self.wopener.symlink(data, filename)
548 else:
548 else:
549 self.wopener(filename, 'w').write(data)
549 self.wopener(filename, 'w').write(data)
550 if 'x' in flags:
550 if 'x' in flags:
551 util.set_flags(self.wjoin(filename), False, True)
551 util.set_flags(self.wjoin(filename), False, True)
552
552
553 def wwritedata(self, filename, data):
553 def wwritedata(self, filename, data):
554 return self._filter("decode", filename, data)
554 return self._filter("decode", filename, data)
555
555
556 def transaction(self):
556 def transaction(self):
557 tr = self._transref and self._transref() or None
557 tr = self._transref and self._transref() or None
558 if tr and tr.running():
558 if tr and tr.running():
559 return tr.nest()
559 return tr.nest()
560
560
561 # abort here if the journal already exists
561 # abort here if the journal already exists
562 if os.path.exists(self.sjoin("journal")):
562 if os.path.exists(self.sjoin("journal")):
563 raise error.RepoError(_("journal already exists - run hg recover"))
563 raise error.RepoError(_("journal already exists - run hg recover"))
564
564
565 # save dirstate for rollback
565 # save dirstate for rollback
566 try:
566 try:
567 ds = self.opener("dirstate").read()
567 ds = self.opener("dirstate").read()
568 except IOError:
568 except IOError:
569 ds = ""
569 ds = ""
570 self.opener("journal.dirstate", "w").write(ds)
570 self.opener("journal.dirstate", "w").write(ds)
571 self.opener("journal.branch", "w").write(self.dirstate.branch())
571 self.opener("journal.branch", "w").write(self.dirstate.branch())
572
572
573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 (self.join("journal.dirstate"), self.join("undo.dirstate")),
574 (self.join("journal.dirstate"), self.join("undo.dirstate")),
575 (self.join("journal.branch"), self.join("undo.branch"))]
575 (self.join("journal.branch"), self.join("undo.branch"))]
576 tr = transaction.transaction(self.ui.warn, self.sopener,
576 tr = transaction.transaction(self.ui.warn, self.sopener,
577 self.sjoin("journal"),
577 self.sjoin("journal"),
578 aftertrans(renames),
578 aftertrans(renames),
579 self.store.createmode)
579 self.store.createmode)
580 self._transref = weakref.ref(tr)
580 self._transref = weakref.ref(tr)
581 return tr
581 return tr
582
582
583 def recover(self):
583 def recover(self):
584 lock = self.lock()
584 lock = self.lock()
585 try:
585 try:
586 if os.path.exists(self.sjoin("journal")):
586 if os.path.exists(self.sjoin("journal")):
587 self.ui.status(_("rolling back interrupted transaction\n"))
587 self.ui.status(_("rolling back interrupted transaction\n"))
588 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
588 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
589 self.invalidate()
589 self.invalidate()
590 return True
590 return True
591 else:
591 else:
592 self.ui.warn(_("no interrupted transaction available\n"))
592 self.ui.warn(_("no interrupted transaction available\n"))
593 return False
593 return False
594 finally:
594 finally:
595 lock.release()
595 lock.release()
596
596
597 def rollback(self):
597 def rollback(self):
598 wlock = lock = None
598 wlock = lock = None
599 try:
599 try:
600 wlock = self.wlock()
600 wlock = self.wlock()
601 lock = self.lock()
601 lock = self.lock()
602 if os.path.exists(self.sjoin("undo")):
602 if os.path.exists(self.sjoin("undo")):
603 self.ui.status(_("rolling back last transaction\n"))
603 self.ui.status(_("rolling back last transaction\n"))
604 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
604 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
605 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
605 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
606 try:
606 try:
607 branch = self.opener("undo.branch").read()
607 branch = self.opener("undo.branch").read()
608 self.dirstate.setbranch(branch)
608 self.dirstate.setbranch(branch)
609 except IOError:
609 except IOError:
610 self.ui.warn(_("Named branch could not be reset, "
610 self.ui.warn(_("Named branch could not be reset, "
611 "current branch still is: %s\n")
611 "current branch still is: %s\n")
612 % encoding.tolocal(self.dirstate.branch()))
612 % encoding.tolocal(self.dirstate.branch()))
613 self.invalidate()
613 self.invalidate()
614 self.dirstate.invalidate()
614 self.dirstate.invalidate()
615 self.destroyed()
615 self.destroyed()
616 else:
616 else:
617 self.ui.warn(_("no rollback information available\n"))
617 self.ui.warn(_("no rollback information available\n"))
618 finally:
618 finally:
619 release(lock, wlock)
619 release(lock, wlock)
620
620
621 def invalidate(self):
621 def invalidate(self):
622 for a in "changelog manifest".split():
622 for a in "changelog manifest".split():
623 if a in self.__dict__:
623 if a in self.__dict__:
624 delattr(self, a)
624 delattr(self, a)
625 self._tags = None
625 self._tags = None
626 self._tagtypes = None
626 self._tagtypes = None
627 self.nodetagscache = None
627 self.nodetagscache = None
628 self.branchcache = None
628 self.branchcache = None
629 self._ubranchcache = None
629 self._ubranchcache = None
630 self._branchcachetip = None
630 self._branchcachetip = None
631
631
632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
633 try:
633 try:
634 l = lock.lock(lockname, 0, releasefn, desc=desc)
634 l = lock.lock(lockname, 0, releasefn, desc=desc)
635 except error.LockHeld, inst:
635 except error.LockHeld, inst:
636 if not wait:
636 if not wait:
637 raise
637 raise
638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
639 (desc, inst.locker))
639 (desc, inst.locker))
640 # default to 600 seconds timeout
640 # default to 600 seconds timeout
641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
642 releasefn, desc=desc)
642 releasefn, desc=desc)
643 if acquirefn:
643 if acquirefn:
644 acquirefn()
644 acquirefn()
645 return l
645 return l
646
646
647 def lock(self, wait=True):
647 def lock(self, wait=True):
648 l = self._lockref and self._lockref()
648 l = self._lockref and self._lockref()
649 if l is not None and l.held:
649 if l is not None and l.held:
650 l.lock()
650 l.lock()
651 return l
651 return l
652
652
653 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
653 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
654 _('repository %s') % self.origroot)
654 _('repository %s') % self.origroot)
655 self._lockref = weakref.ref(l)
655 self._lockref = weakref.ref(l)
656 return l
656 return l
657
657
658 def wlock(self, wait=True):
658 def wlock(self, wait=True):
659 l = self._wlockref and self._wlockref()
659 l = self._wlockref and self._wlockref()
660 if l is not None and l.held:
660 if l is not None and l.held:
661 l.lock()
661 l.lock()
662 return l
662 return l
663
663
664 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
665 self.dirstate.invalidate, _('working directory of %s') %
665 self.dirstate.invalidate, _('working directory of %s') %
666 self.origroot)
666 self.origroot)
667 self._wlockref = weakref.ref(l)
667 self._wlockref = weakref.ref(l)
668 return l
668 return l
669
669
670 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
670 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
671 """
671 """
672 commit an individual file as part of a larger transaction
672 commit an individual file as part of a larger transaction
673 """
673 """
674
674
675 fname = fctx.path()
675 fname = fctx.path()
676 text = fctx.data()
676 text = fctx.data()
677 flog = self.file(fname)
677 flog = self.file(fname)
678 fparent1 = manifest1.get(fname, nullid)
678 fparent1 = manifest1.get(fname, nullid)
679 fparent2 = fparent2o = manifest2.get(fname, nullid)
679 fparent2 = fparent2o = manifest2.get(fname, nullid)
680
680
681 meta = {}
681 meta = {}
682 copy = fctx.renamed()
682 copy = fctx.renamed()
683 if copy and copy[0] != fname:
683 if copy and copy[0] != fname:
684 # Mark the new revision of this file as a copy of another
684 # Mark the new revision of this file as a copy of another
685 # file. This copy data will effectively act as a parent
685 # file. This copy data will effectively act as a parent
686 # of this new revision. If this is a merge, the first
686 # of this new revision. If this is a merge, the first
687 # parent will be the nullid (meaning "look up the copy data")
687 # parent will be the nullid (meaning "look up the copy data")
688 # and the second one will be the other parent. For example:
688 # and the second one will be the other parent. For example:
689 #
689 #
690 # 0 --- 1 --- 3 rev1 changes file foo
690 # 0 --- 1 --- 3 rev1 changes file foo
691 # \ / rev2 renames foo to bar and changes it
691 # \ / rev2 renames foo to bar and changes it
692 # \- 2 -/ rev3 should have bar with all changes and
692 # \- 2 -/ rev3 should have bar with all changes and
693 # should record that bar descends from
693 # should record that bar descends from
694 # bar in rev2 and foo in rev1
694 # bar in rev2 and foo in rev1
695 #
695 #
696 # this allows this merge to succeed:
696 # this allows this merge to succeed:
697 #
697 #
698 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
698 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
699 # \ / merging rev3 and rev4 should use bar@rev2
699 # \ / merging rev3 and rev4 should use bar@rev2
700 # \- 2 --- 4 as the merge base
700 # \- 2 --- 4 as the merge base
701 #
701 #
702
702
703 cfname = copy[0]
703 cfname = copy[0]
704 crev = manifest1.get(cfname)
704 crev = manifest1.get(cfname)
705 newfparent = fparent2
705 newfparent = fparent2
706
706
707 if manifest2: # branch merge
707 if manifest2: # branch merge
708 if fparent2 == nullid or crev is None: # copied on remote side
708 if fparent2 == nullid or crev is None: # copied on remote side
709 if cfname in manifest2:
709 if cfname in manifest2:
710 crev = manifest2[cfname]
710 crev = manifest2[cfname]
711 newfparent = fparent1
711 newfparent = fparent1
712
712
713 # find source in nearest ancestor if we've lost track
713 # find source in nearest ancestor if we've lost track
714 if not crev:
714 if not crev:
715 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
715 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
716 (fname, cfname))
716 (fname, cfname))
717 for ancestor in self['.'].ancestors():
717 for ancestor in self['.'].ancestors():
718 if cfname in ancestor:
718 if cfname in ancestor:
719 crev = ancestor[cfname].filenode()
719 crev = ancestor[cfname].filenode()
720 break
720 break
721
721
722 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
722 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
723 meta["copy"] = cfname
723 meta["copy"] = cfname
724 meta["copyrev"] = hex(crev)
724 meta["copyrev"] = hex(crev)
725 fparent1, fparent2 = nullid, newfparent
725 fparent1, fparent2 = nullid, newfparent
726 elif fparent2 != nullid:
726 elif fparent2 != nullid:
727 # is one parent an ancestor of the other?
727 # is one parent an ancestor of the other?
728 fparentancestor = flog.ancestor(fparent1, fparent2)
728 fparentancestor = flog.ancestor(fparent1, fparent2)
729 if fparentancestor == fparent1:
729 if fparentancestor == fparent1:
730 fparent1, fparent2 = fparent2, nullid
730 fparent1, fparent2 = fparent2, nullid
731 elif fparentancestor == fparent2:
731 elif fparentancestor == fparent2:
732 fparent2 = nullid
732 fparent2 = nullid
733
733
734 # is the file changed?
734 # is the file changed?
735 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
735 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
736 changelist.append(fname)
736 changelist.append(fname)
737 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
737 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
738
738
739 # are just the flags changed during merge?
739 # are just the flags changed during merge?
740 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
740 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
741 changelist.append(fname)
741 changelist.append(fname)
742
742
743 return fparent1
743 return fparent1
744
744
745 def commit(self, text="", user=None, date=None, match=None, force=False,
745 def commit(self, text="", user=None, date=None, match=None, force=False,
746 editor=False, extra={}):
746 editor=False, extra={}):
747 """Add a new revision to current repository.
747 """Add a new revision to current repository.
748
748
749 Revision information is gathered from the working directory,
749 Revision information is gathered from the working directory,
750 match can be used to filter the committed files. If editor is
750 match can be used to filter the committed files. If editor is
751 supplied, it is called to get a commit message.
751 supplied, it is called to get a commit message.
752 """
752 """
753
753
754 def fail(f, msg):
754 def fail(f, msg):
755 raise util.Abort('%s: %s' % (f, msg))
755 raise util.Abort('%s: %s' % (f, msg))
756
756
757 if not match:
757 if not match:
758 match = match_.always(self.root, '')
758 match = match_.always(self.root, '')
759
759
760 if not force:
760 if not force:
761 vdirs = []
761 vdirs = []
762 match.dir = vdirs.append
762 match.dir = vdirs.append
763 match.bad = fail
763 match.bad = fail
764
764
765 wlock = self.wlock()
765 wlock = self.wlock()
766 try:
766 try:
767 p1, p2 = self.dirstate.parents()
767 p1, p2 = self.dirstate.parents()
768 wctx = self[None]
768 wctx = self[None]
769
769
770 if (not force and p2 != nullid and match and
770 if (not force and p2 != nullid and match and
771 (match.files() or match.anypats())):
771 (match.files() or match.anypats())):
772 raise util.Abort(_('cannot partially commit a merge '
772 raise util.Abort(_('cannot partially commit a merge '
773 '(do not specify files or patterns)'))
773 '(do not specify files or patterns)'))
774
774
775 changes = self.status(match=match, clean=force)
775 changes = self.status(match=match, clean=force)
776 if force:
776 if force:
777 changes[0].extend(changes[6]) # mq may commit unchanged files
777 changes[0].extend(changes[6]) # mq may commit unchanged files
778
778
779 # check subrepos
779 # check subrepos
780 subs = []
780 subs = []
781 for s in wctx.substate:
781 for s in wctx.substate:
782 if match(s) and wctx.sub(s).dirty():
782 if match(s) and wctx.sub(s).dirty():
783 subs.append(s)
783 subs.append(s)
784 if subs and '.hgsubstate' not in changes[0]:
784 if subs and '.hgsubstate' not in changes[0]:
785 changes[0].insert(0, '.hgsubstate')
785 changes[0].insert(0, '.hgsubstate')
786
786
787 # make sure all explicit patterns are matched
787 # make sure all explicit patterns are matched
788 if not force and match.files():
788 if not force and match.files():
789 matched = set(changes[0] + changes[1] + changes[2])
789 matched = set(changes[0] + changes[1] + changes[2])
790
790
791 for f in match.files():
791 for f in match.files():
792 if f == '.' or f in matched or f in wctx.substate:
792 if f == '.' or f in matched or f in wctx.substate:
793 continue
793 continue
794 if f in changes[3]: # missing
794 if f in changes[3]: # missing
795 fail(f, _('file not found!'))
795 fail(f, _('file not found!'))
796 if f in vdirs: # visited directory
796 if f in vdirs: # visited directory
797 d = f + '/'
797 d = f + '/'
798 for mf in matched:
798 for mf in matched:
799 if mf.startswith(d):
799 if mf.startswith(d):
800 break
800 break
801 else:
801 else:
802 fail(f, _("no match under directory!"))
802 fail(f, _("no match under directory!"))
803 elif f not in self.dirstate:
803 elif f not in self.dirstate:
804 fail(f, _("file not tracked!"))
804 fail(f, _("file not tracked!"))
805
805
806 if (not force and not extra.get("close") and p2 == nullid
806 if (not force and not extra.get("close") and p2 == nullid
807 and not (changes[0] or changes[1] or changes[2])
807 and not (changes[0] or changes[1] or changes[2])
808 and self[None].branch() == self['.'].branch()):
808 and self[None].branch() == self['.'].branch()):
809 return None
809 return None
810
810
811 ms = merge_.mergestate(self)
811 ms = merge_.mergestate(self)
812 for f in changes[0]:
812 for f in changes[0]:
813 if f in ms and ms[f] == 'u':
813 if f in ms and ms[f] == 'u':
814 raise util.Abort(_("unresolved merge conflicts "
814 raise util.Abort(_("unresolved merge conflicts "
815 "(see hg resolve)"))
815 "(see hg resolve)"))
816
816
817 cctx = context.workingctx(self, (p1, p2), text, user, date,
817 cctx = context.workingctx(self, (p1, p2), text, user, date,
818 extra, changes)
818 extra, changes)
819 if editor:
819 if editor:
820 cctx._text = editor(self, cctx, subs)
820 cctx._text = editor(self, cctx, subs)
821
821
822 # commit subs
822 # commit subs
823 if subs:
823 if subs:
824 state = wctx.substate.copy()
824 state = wctx.substate.copy()
825 for s in subs:
825 for s in subs:
826 self.ui.status(_('committing subrepository %s\n') % s)
826 self.ui.status(_('committing subrepository %s\n') % s)
827 sr = wctx.sub(s).commit(cctx._text, user, date)
827 sr = wctx.sub(s).commit(cctx._text, user, date)
828 state[s] = (state[s][0], sr)
828 state[s] = (state[s][0], sr)
829 subrepo.writestate(self, state)
829 subrepo.writestate(self, state)
830
830
831 ret = self.commitctx(cctx, True)
831 ret = self.commitctx(cctx, True)
832
832
833 # update dirstate and mergestate
833 # update dirstate and mergestate
834 for f in changes[0] + changes[1]:
834 for f in changes[0] + changes[1]:
835 self.dirstate.normal(f)
835 self.dirstate.normal(f)
836 for f in changes[2]:
836 for f in changes[2]:
837 self.dirstate.forget(f)
837 self.dirstate.forget(f)
838 self.dirstate.setparents(ret)
838 self.dirstate.setparents(ret)
839 ms.reset()
839 ms.reset()
840
840
841 return ret
841 return ret
842
842
843 finally:
843 finally:
844 wlock.release()
844 wlock.release()
845
845
846 def commitctx(self, ctx, error=False):
846 def commitctx(self, ctx, error=False):
847 """Add a new revision to current repository.
847 """Add a new revision to current repository.
848
848
849 Revision information is passed via the context argument.
849 Revision information is passed via the context argument.
850 """
850 """
851
851
852 tr = lock = None
852 tr = lock = None
853 removed = ctx.removed()
853 removed = ctx.removed()
854 p1, p2 = ctx.p1(), ctx.p2()
854 p1, p2 = ctx.p1(), ctx.p2()
855 m1 = p1.manifest().copy()
855 m1 = p1.manifest().copy()
856 m2 = p2.manifest()
856 m2 = p2.manifest()
857 user = ctx.user()
857 user = ctx.user()
858
858
859 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
859 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
860 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
860 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
861
861
862 lock = self.lock()
862 lock = self.lock()
863 try:
863 try:
864 tr = self.transaction()
864 tr = self.transaction()
865 trp = weakref.proxy(tr)
865 trp = weakref.proxy(tr)
866
866
867 # check in files
867 # check in files
868 new = {}
868 new = {}
869 changed = []
869 changed = []
870 linkrev = len(self)
870 linkrev = len(self)
871 for f in sorted(ctx.modified() + ctx.added()):
871 for f in sorted(ctx.modified() + ctx.added()):
872 self.ui.note(f + "\n")
872 self.ui.note(f + "\n")
873 try:
873 try:
874 fctx = ctx[f]
874 fctx = ctx[f]
875 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
875 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
876 changed)
876 changed)
877 m1.set(f, fctx.flags())
877 m1.set(f, fctx.flags())
878 except (OSError, IOError):
878 except (OSError, IOError):
879 if error:
879 if error:
880 self.ui.warn(_("trouble committing %s!\n") % f)
880 self.ui.warn(_("trouble committing %s!\n") % f)
881 raise
881 raise
882 else:
882 else:
883 removed.append(f)
883 removed.append(f)
884
884
885 # update manifest
885 # update manifest
886 m1.update(new)
886 m1.update(new)
887 removed = [f for f in sorted(removed) if f in m1 or f in m2]
887 removed = [f for f in sorted(removed) if f in m1 or f in m2]
888 drop = [f for f in removed if f in m1]
888 drop = [f for f in removed if f in m1]
889 for f in drop:
889 for f in drop:
890 del m1[f]
890 del m1[f]
891 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
891 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
892 p2.manifestnode(), (new, drop))
892 p2.manifestnode(), (new, drop))
893
893
894 # update changelog
894 # update changelog
895 self.changelog.delayupdate()
895 self.changelog.delayupdate()
896 n = self.changelog.add(mn, changed + removed, ctx.description(),
896 n = self.changelog.add(mn, changed + removed, ctx.description(),
897 trp, p1.node(), p2.node(),
897 trp, p1.node(), p2.node(),
898 user, ctx.date(), ctx.extra().copy())
898 user, ctx.date(), ctx.extra().copy())
899 p = lambda: self.changelog.writepending() and self.root or ""
899 p = lambda: self.changelog.writepending() and self.root or ""
900 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
900 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
901 parent2=xp2, pending=p)
901 parent2=xp2, pending=p)
902 self.changelog.finalize(trp)
902 self.changelog.finalize(trp)
903 tr.close()
903 tr.close()
904
904
905 if self.branchcache:
905 if self.branchcache:
906 self.branchtags()
906 self.branchtags()
907
907
908 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
908 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
909 return n
909 return n
910 finally:
910 finally:
911 del tr
911 del tr
912 lock.release()
912 lock.release()
913
913
914 def destroyed(self):
914 def destroyed(self):
915 '''Inform the repository that nodes have been destroyed.
915 '''Inform the repository that nodes have been destroyed.
916 Intended for use by strip and rollback, so there's a common
916 Intended for use by strip and rollback, so there's a common
917 place for anything that has to be done after destroying history.'''
917 place for anything that has to be done after destroying history.'''
918 # Do nothing for now: this is a placeholder that will be used
919 # when we add tag caching.
920 # XXX it might be nice if we could take the list of destroyed
918 # XXX it might be nice if we could take the list of destroyed
921 # nodes, but I don't see an easy way for rollback() to do that
919 # nodes, but I don't see an easy way for rollback() to do that
922 pass
920
921 # Ensure the persistent tag cache is updated. Doing it now
922 # means that the tag cache only has to worry about destroyed
923 # heads immediately after a strip/rollback. That in turn
924 # guarantees that "cachetip == currenttip" (comparing both rev
925 # and node) always means no nodes have been added or destroyed.
926
927 # XXX this is suboptimal when qrefresh'ing: we strip the current
928 # head, refresh the tag cache, then immediately add a new head.
929 # But I think doing it this way is necessary for the "instant
930 # tag cache retrieval" case to work.
931 tags_.findglobaltags(self.ui, self, {}, {})
923
932
924 def walk(self, match, node=None):
933 def walk(self, match, node=None):
925 '''
934 '''
926 walk recursively through the directory tree or a given
935 walk recursively through the directory tree or a given
927 changeset, finding all files matched by the match
936 changeset, finding all files matched by the match
928 function
937 function
929 '''
938 '''
930 return self[node].walk(match)
939 return self[node].walk(match)
931
940
932 def status(self, node1='.', node2=None, match=None,
941 def status(self, node1='.', node2=None, match=None,
933 ignored=False, clean=False, unknown=False):
942 ignored=False, clean=False, unknown=False):
934 """return status of files between two nodes or node and working directory
943 """return status of files between two nodes or node and working directory
935
944
936 If node1 is None, use the first dirstate parent instead.
945 If node1 is None, use the first dirstate parent instead.
937 If node2 is None, compare node1 with working directory.
946 If node2 is None, compare node1 with working directory.
938 """
947 """
939
948
940 def mfmatches(ctx):
949 def mfmatches(ctx):
941 mf = ctx.manifest().copy()
950 mf = ctx.manifest().copy()
942 for fn in mf.keys():
951 for fn in mf.keys():
943 if not match(fn):
952 if not match(fn):
944 del mf[fn]
953 del mf[fn]
945 return mf
954 return mf
946
955
947 if isinstance(node1, context.changectx):
956 if isinstance(node1, context.changectx):
948 ctx1 = node1
957 ctx1 = node1
949 else:
958 else:
950 ctx1 = self[node1]
959 ctx1 = self[node1]
951 if isinstance(node2, context.changectx):
960 if isinstance(node2, context.changectx):
952 ctx2 = node2
961 ctx2 = node2
953 else:
962 else:
954 ctx2 = self[node2]
963 ctx2 = self[node2]
955
964
956 working = ctx2.rev() is None
965 working = ctx2.rev() is None
957 parentworking = working and ctx1 == self['.']
966 parentworking = working and ctx1 == self['.']
958 match = match or match_.always(self.root, self.getcwd())
967 match = match or match_.always(self.root, self.getcwd())
959 listignored, listclean, listunknown = ignored, clean, unknown
968 listignored, listclean, listunknown = ignored, clean, unknown
960
969
961 # load earliest manifest first for caching reasons
970 # load earliest manifest first for caching reasons
962 if not working and ctx2.rev() < ctx1.rev():
971 if not working and ctx2.rev() < ctx1.rev():
963 ctx2.manifest()
972 ctx2.manifest()
964
973
965 if not parentworking:
974 if not parentworking:
966 def bad(f, msg):
975 def bad(f, msg):
967 if f not in ctx1:
976 if f not in ctx1:
968 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
977 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
969 match.bad = bad
978 match.bad = bad
970
979
971 if working: # we need to scan the working dir
980 if working: # we need to scan the working dir
972 s = self.dirstate.status(match, listignored, listclean, listunknown)
981 s = self.dirstate.status(match, listignored, listclean, listunknown)
973 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
982 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
974
983
975 # check for any possibly clean files
984 # check for any possibly clean files
976 if parentworking and cmp:
985 if parentworking and cmp:
977 fixup = []
986 fixup = []
978 # do a full compare of any files that might have changed
987 # do a full compare of any files that might have changed
979 for f in sorted(cmp):
988 for f in sorted(cmp):
980 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
989 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
981 or ctx1[f].cmp(ctx2[f].data())):
990 or ctx1[f].cmp(ctx2[f].data())):
982 modified.append(f)
991 modified.append(f)
983 else:
992 else:
984 fixup.append(f)
993 fixup.append(f)
985
994
986 if listclean:
995 if listclean:
987 clean += fixup
996 clean += fixup
988
997
989 # update dirstate for files that are actually clean
998 # update dirstate for files that are actually clean
990 if fixup:
999 if fixup:
991 try:
1000 try:
992 # updating the dirstate is optional
1001 # updating the dirstate is optional
993 # so we don't wait on the lock
1002 # so we don't wait on the lock
994 wlock = self.wlock(False)
1003 wlock = self.wlock(False)
995 try:
1004 try:
996 for f in fixup:
1005 for f in fixup:
997 self.dirstate.normal(f)
1006 self.dirstate.normal(f)
998 finally:
1007 finally:
999 wlock.release()
1008 wlock.release()
1000 except error.LockError:
1009 except error.LockError:
1001 pass
1010 pass
1002
1011
1003 if not parentworking:
1012 if not parentworking:
1004 mf1 = mfmatches(ctx1)
1013 mf1 = mfmatches(ctx1)
1005 if working:
1014 if working:
1006 # we are comparing working dir against non-parent
1015 # we are comparing working dir against non-parent
1007 # generate a pseudo-manifest for the working dir
1016 # generate a pseudo-manifest for the working dir
1008 mf2 = mfmatches(self['.'])
1017 mf2 = mfmatches(self['.'])
1009 for f in cmp + modified + added:
1018 for f in cmp + modified + added:
1010 mf2[f] = None
1019 mf2[f] = None
1011 mf2.set(f, ctx2.flags(f))
1020 mf2.set(f, ctx2.flags(f))
1012 for f in removed:
1021 for f in removed:
1013 if f in mf2:
1022 if f in mf2:
1014 del mf2[f]
1023 del mf2[f]
1015 else:
1024 else:
1016 # we are comparing two revisions
1025 # we are comparing two revisions
1017 deleted, unknown, ignored = [], [], []
1026 deleted, unknown, ignored = [], [], []
1018 mf2 = mfmatches(ctx2)
1027 mf2 = mfmatches(ctx2)
1019
1028
1020 modified, added, clean = [], [], []
1029 modified, added, clean = [], [], []
1021 for fn in mf2:
1030 for fn in mf2:
1022 if fn in mf1:
1031 if fn in mf1:
1023 if (mf1.flags(fn) != mf2.flags(fn) or
1032 if (mf1.flags(fn) != mf2.flags(fn) or
1024 (mf1[fn] != mf2[fn] and
1033 (mf1[fn] != mf2[fn] and
1025 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1034 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1026 modified.append(fn)
1035 modified.append(fn)
1027 elif listclean:
1036 elif listclean:
1028 clean.append(fn)
1037 clean.append(fn)
1029 del mf1[fn]
1038 del mf1[fn]
1030 else:
1039 else:
1031 added.append(fn)
1040 added.append(fn)
1032 removed = mf1.keys()
1041 removed = mf1.keys()
1033
1042
1034 r = modified, added, removed, deleted, unknown, ignored, clean
1043 r = modified, added, removed, deleted, unknown, ignored, clean
1035 [l.sort() for l in r]
1044 [l.sort() for l in r]
1036 return r
1045 return r
1037
1046
1038 def add(self, list):
1047 def add(self, list):
1039 wlock = self.wlock()
1048 wlock = self.wlock()
1040 try:
1049 try:
1041 rejected = []
1050 rejected = []
1042 for f in list:
1051 for f in list:
1043 p = self.wjoin(f)
1052 p = self.wjoin(f)
1044 try:
1053 try:
1045 st = os.lstat(p)
1054 st = os.lstat(p)
1046 except:
1055 except:
1047 self.ui.warn(_("%s does not exist!\n") % f)
1056 self.ui.warn(_("%s does not exist!\n") % f)
1048 rejected.append(f)
1057 rejected.append(f)
1049 continue
1058 continue
1050 if st.st_size > 10000000:
1059 if st.st_size > 10000000:
1051 self.ui.warn(_("%s: files over 10MB may cause memory and"
1060 self.ui.warn(_("%s: files over 10MB may cause memory and"
1052 " performance problems\n"
1061 " performance problems\n"
1053 "(use 'hg revert %s' to unadd the file)\n")
1062 "(use 'hg revert %s' to unadd the file)\n")
1054 % (f, f))
1063 % (f, f))
1055 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1064 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1056 self.ui.warn(_("%s not added: only files and symlinks "
1065 self.ui.warn(_("%s not added: only files and symlinks "
1057 "supported currently\n") % f)
1066 "supported currently\n") % f)
1058 rejected.append(p)
1067 rejected.append(p)
1059 elif self.dirstate[f] in 'amn':
1068 elif self.dirstate[f] in 'amn':
1060 self.ui.warn(_("%s already tracked!\n") % f)
1069 self.ui.warn(_("%s already tracked!\n") % f)
1061 elif self.dirstate[f] == 'r':
1070 elif self.dirstate[f] == 'r':
1062 self.dirstate.normallookup(f)
1071 self.dirstate.normallookup(f)
1063 else:
1072 else:
1064 self.dirstate.add(f)
1073 self.dirstate.add(f)
1065 return rejected
1074 return rejected
1066 finally:
1075 finally:
1067 wlock.release()
1076 wlock.release()
1068
1077
1069 def forget(self, list):
1078 def forget(self, list):
1070 wlock = self.wlock()
1079 wlock = self.wlock()
1071 try:
1080 try:
1072 for f in list:
1081 for f in list:
1073 if self.dirstate[f] != 'a':
1082 if self.dirstate[f] != 'a':
1074 self.ui.warn(_("%s not added!\n") % f)
1083 self.ui.warn(_("%s not added!\n") % f)
1075 else:
1084 else:
1076 self.dirstate.forget(f)
1085 self.dirstate.forget(f)
1077 finally:
1086 finally:
1078 wlock.release()
1087 wlock.release()
1079
1088
1080 def remove(self, list, unlink=False):
1089 def remove(self, list, unlink=False):
1081 if unlink:
1090 if unlink:
1082 for f in list:
1091 for f in list:
1083 try:
1092 try:
1084 util.unlink(self.wjoin(f))
1093 util.unlink(self.wjoin(f))
1085 except OSError, inst:
1094 except OSError, inst:
1086 if inst.errno != errno.ENOENT:
1095 if inst.errno != errno.ENOENT:
1087 raise
1096 raise
1088 wlock = self.wlock()
1097 wlock = self.wlock()
1089 try:
1098 try:
1090 for f in list:
1099 for f in list:
1091 if unlink and os.path.exists(self.wjoin(f)):
1100 if unlink and os.path.exists(self.wjoin(f)):
1092 self.ui.warn(_("%s still exists!\n") % f)
1101 self.ui.warn(_("%s still exists!\n") % f)
1093 elif self.dirstate[f] == 'a':
1102 elif self.dirstate[f] == 'a':
1094 self.dirstate.forget(f)
1103 self.dirstate.forget(f)
1095 elif f not in self.dirstate:
1104 elif f not in self.dirstate:
1096 self.ui.warn(_("%s not tracked!\n") % f)
1105 self.ui.warn(_("%s not tracked!\n") % f)
1097 else:
1106 else:
1098 self.dirstate.remove(f)
1107 self.dirstate.remove(f)
1099 finally:
1108 finally:
1100 wlock.release()
1109 wlock.release()
1101
1110
1102 def undelete(self, list):
1111 def undelete(self, list):
1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1112 manifests = [self.manifest.read(self.changelog.read(p)[0])
1104 for p in self.dirstate.parents() if p != nullid]
1113 for p in self.dirstate.parents() if p != nullid]
1105 wlock = self.wlock()
1114 wlock = self.wlock()
1106 try:
1115 try:
1107 for f in list:
1116 for f in list:
1108 if self.dirstate[f] != 'r':
1117 if self.dirstate[f] != 'r':
1109 self.ui.warn(_("%s not removed!\n") % f)
1118 self.ui.warn(_("%s not removed!\n") % f)
1110 else:
1119 else:
1111 m = f in manifests[0] and manifests[0] or manifests[1]
1120 m = f in manifests[0] and manifests[0] or manifests[1]
1112 t = self.file(f).read(m[f])
1121 t = self.file(f).read(m[f])
1113 self.wwrite(f, t, m.flags(f))
1122 self.wwrite(f, t, m.flags(f))
1114 self.dirstate.normal(f)
1123 self.dirstate.normal(f)
1115 finally:
1124 finally:
1116 wlock.release()
1125 wlock.release()
1117
1126
1118 def copy(self, source, dest):
1127 def copy(self, source, dest):
1119 p = self.wjoin(dest)
1128 p = self.wjoin(dest)
1120 if not (os.path.exists(p) or os.path.islink(p)):
1129 if not (os.path.exists(p) or os.path.islink(p)):
1121 self.ui.warn(_("%s does not exist!\n") % dest)
1130 self.ui.warn(_("%s does not exist!\n") % dest)
1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1131 elif not (os.path.isfile(p) or os.path.islink(p)):
1123 self.ui.warn(_("copy failed: %s is not a file or a "
1132 self.ui.warn(_("copy failed: %s is not a file or a "
1124 "symbolic link\n") % dest)
1133 "symbolic link\n") % dest)
1125 else:
1134 else:
1126 wlock = self.wlock()
1135 wlock = self.wlock()
1127 try:
1136 try:
1128 if self.dirstate[dest] in '?r':
1137 if self.dirstate[dest] in '?r':
1129 self.dirstate.add(dest)
1138 self.dirstate.add(dest)
1130 self.dirstate.copy(source, dest)
1139 self.dirstate.copy(source, dest)
1131 finally:
1140 finally:
1132 wlock.release()
1141 wlock.release()
1133
1142
1134 def heads(self, start=None):
1143 def heads(self, start=None):
1135 heads = self.changelog.heads(start)
1144 heads = self.changelog.heads(start)
1136 # sort the output in rev descending order
1145 # sort the output in rev descending order
1137 heads = [(-self.changelog.rev(h), h) for h in heads]
1146 heads = [(-self.changelog.rev(h), h) for h in heads]
1138 return [n for (r, n) in sorted(heads)]
1147 return [n for (r, n) in sorted(heads)]
1139
1148
1140 def branchheads(self, branch=None, start=None, closed=False):
1149 def branchheads(self, branch=None, start=None, closed=False):
1141 if branch is None:
1150 if branch is None:
1142 branch = self[None].branch()
1151 branch = self[None].branch()
1143 branches = self.branchmap()
1152 branches = self.branchmap()
1144 if branch not in branches:
1153 if branch not in branches:
1145 return []
1154 return []
1146 bheads = branches[branch]
1155 bheads = branches[branch]
1147 # the cache returns heads ordered lowest to highest
1156 # the cache returns heads ordered lowest to highest
1148 bheads.reverse()
1157 bheads.reverse()
1149 if start is not None:
1158 if start is not None:
1150 # filter out the heads that cannot be reached from startrev
1159 # filter out the heads that cannot be reached from startrev
1151 bheads = self.changelog.nodesbetween([start], bheads)[2]
1160 bheads = self.changelog.nodesbetween([start], bheads)[2]
1152 if not closed:
1161 if not closed:
1153 bheads = [h for h in bheads if
1162 bheads = [h for h in bheads if
1154 ('close' not in self.changelog.read(h)[5])]
1163 ('close' not in self.changelog.read(h)[5])]
1155 return bheads
1164 return bheads
1156
1165
1157 def branches(self, nodes):
1166 def branches(self, nodes):
1158 if not nodes:
1167 if not nodes:
1159 nodes = [self.changelog.tip()]
1168 nodes = [self.changelog.tip()]
1160 b = []
1169 b = []
1161 for n in nodes:
1170 for n in nodes:
1162 t = n
1171 t = n
1163 while 1:
1172 while 1:
1164 p = self.changelog.parents(n)
1173 p = self.changelog.parents(n)
1165 if p[1] != nullid or p[0] == nullid:
1174 if p[1] != nullid or p[0] == nullid:
1166 b.append((t, n, p[0], p[1]))
1175 b.append((t, n, p[0], p[1]))
1167 break
1176 break
1168 n = p[0]
1177 n = p[0]
1169 return b
1178 return b
1170
1179
1171 def between(self, pairs):
1180 def between(self, pairs):
1172 r = []
1181 r = []
1173
1182
1174 for top, bottom in pairs:
1183 for top, bottom in pairs:
1175 n, l, i = top, [], 0
1184 n, l, i = top, [], 0
1176 f = 1
1185 f = 1
1177
1186
1178 while n != bottom and n != nullid:
1187 while n != bottom and n != nullid:
1179 p = self.changelog.parents(n)[0]
1188 p = self.changelog.parents(n)[0]
1180 if i == f:
1189 if i == f:
1181 l.append(n)
1190 l.append(n)
1182 f = f * 2
1191 f = f * 2
1183 n = p
1192 n = p
1184 i += 1
1193 i += 1
1185
1194
1186 r.append(l)
1195 r.append(l)
1187
1196
1188 return r
1197 return r
1189
1198
1190 def findincoming(self, remote, base=None, heads=None, force=False):
1199 def findincoming(self, remote, base=None, heads=None, force=False):
1191 """Return list of roots of the subsets of missing nodes from remote
1200 """Return list of roots of the subsets of missing nodes from remote
1192
1201
1193 If base dict is specified, assume that these nodes and their parents
1202 If base dict is specified, assume that these nodes and their parents
1194 exist on the remote side and that no child of a node of base exists
1203 exist on the remote side and that no child of a node of base exists
1195 in both remote and self.
1204 in both remote and self.
1196 Furthermore base will be updated to include the nodes that exists
1205 Furthermore base will be updated to include the nodes that exists
1197 in self and remote but no children exists in self and remote.
1206 in self and remote but no children exists in self and remote.
1198 If a list of heads is specified, return only nodes which are heads
1207 If a list of heads is specified, return only nodes which are heads
1199 or ancestors of these heads.
1208 or ancestors of these heads.
1200
1209
1201 All the ancestors of base are in self and in remote.
1210 All the ancestors of base are in self and in remote.
1202 All the descendants of the list returned are missing in self.
1211 All the descendants of the list returned are missing in self.
1203 (and so we know that the rest of the nodes are missing in remote, see
1212 (and so we know that the rest of the nodes are missing in remote, see
1204 outgoing)
1213 outgoing)
1205 """
1214 """
1206 return self.findcommonincoming(remote, base, heads, force)[1]
1215 return self.findcommonincoming(remote, base, heads, force)[1]
1207
1216
1208 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1217 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1209 """Return a tuple (common, missing roots, heads) used to identify
1218 """Return a tuple (common, missing roots, heads) used to identify
1210 missing nodes from remote.
1219 missing nodes from remote.
1211
1220
1212 If base dict is specified, assume that these nodes and their parents
1221 If base dict is specified, assume that these nodes and their parents
1213 exist on the remote side and that no child of a node of base exists
1222 exist on the remote side and that no child of a node of base exists
1214 in both remote and self.
1223 in both remote and self.
1215 Furthermore base will be updated to include the nodes that exists
1224 Furthermore base will be updated to include the nodes that exists
1216 in self and remote but no children exists in self and remote.
1225 in self and remote but no children exists in self and remote.
1217 If a list of heads is specified, return only nodes which are heads
1226 If a list of heads is specified, return only nodes which are heads
1218 or ancestors of these heads.
1227 or ancestors of these heads.
1219
1228
1220 All the ancestors of base are in self and in remote.
1229 All the ancestors of base are in self and in remote.
1221 """
1230 """
1222 m = self.changelog.nodemap
1231 m = self.changelog.nodemap
1223 search = []
1232 search = []
1224 fetch = set()
1233 fetch = set()
1225 seen = set()
1234 seen = set()
1226 seenbranch = set()
1235 seenbranch = set()
1227 if base is None:
1236 if base is None:
1228 base = {}
1237 base = {}
1229
1238
1230 if not heads:
1239 if not heads:
1231 heads = remote.heads()
1240 heads = remote.heads()
1232
1241
1233 if self.changelog.tip() == nullid:
1242 if self.changelog.tip() == nullid:
1234 base[nullid] = 1
1243 base[nullid] = 1
1235 if heads != [nullid]:
1244 if heads != [nullid]:
1236 return [nullid], [nullid], list(heads)
1245 return [nullid], [nullid], list(heads)
1237 return [nullid], [], []
1246 return [nullid], [], []
1238
1247
1239 # assume we're closer to the tip than the root
1248 # assume we're closer to the tip than the root
1240 # and start by examining the heads
1249 # and start by examining the heads
1241 self.ui.status(_("searching for changes\n"))
1250 self.ui.status(_("searching for changes\n"))
1242
1251
1243 unknown = []
1252 unknown = []
1244 for h in heads:
1253 for h in heads:
1245 if h not in m:
1254 if h not in m:
1246 unknown.append(h)
1255 unknown.append(h)
1247 else:
1256 else:
1248 base[h] = 1
1257 base[h] = 1
1249
1258
1250 heads = unknown
1259 heads = unknown
1251 if not unknown:
1260 if not unknown:
1252 return base.keys(), [], []
1261 return base.keys(), [], []
1253
1262
1254 req = set(unknown)
1263 req = set(unknown)
1255 reqcnt = 0
1264 reqcnt = 0
1256
1265
1257 # search through remote branches
1266 # search through remote branches
1258 # a 'branch' here is a linear segment of history, with four parts:
1267 # a 'branch' here is a linear segment of history, with four parts:
1259 # head, root, first parent, second parent
1268 # head, root, first parent, second parent
1260 # (a branch always has two parents (or none) by definition)
1269 # (a branch always has two parents (or none) by definition)
1261 unknown = remote.branches(unknown)
1270 unknown = remote.branches(unknown)
1262 while unknown:
1271 while unknown:
1263 r = []
1272 r = []
1264 while unknown:
1273 while unknown:
1265 n = unknown.pop(0)
1274 n = unknown.pop(0)
1266 if n[0] in seen:
1275 if n[0] in seen:
1267 continue
1276 continue
1268
1277
1269 self.ui.debug(_("examining %s:%s\n")
1278 self.ui.debug(_("examining %s:%s\n")
1270 % (short(n[0]), short(n[1])))
1279 % (short(n[0]), short(n[1])))
1271 if n[0] == nullid: # found the end of the branch
1280 if n[0] == nullid: # found the end of the branch
1272 pass
1281 pass
1273 elif n in seenbranch:
1282 elif n in seenbranch:
1274 self.ui.debug(_("branch already found\n"))
1283 self.ui.debug(_("branch already found\n"))
1275 continue
1284 continue
1276 elif n[1] and n[1] in m: # do we know the base?
1285 elif n[1] and n[1] in m: # do we know the base?
1277 self.ui.debug(_("found incomplete branch %s:%s\n")
1286 self.ui.debug(_("found incomplete branch %s:%s\n")
1278 % (short(n[0]), short(n[1])))
1287 % (short(n[0]), short(n[1])))
1279 search.append(n[0:2]) # schedule branch range for scanning
1288 search.append(n[0:2]) # schedule branch range for scanning
1280 seenbranch.add(n)
1289 seenbranch.add(n)
1281 else:
1290 else:
1282 if n[1] not in seen and n[1] not in fetch:
1291 if n[1] not in seen and n[1] not in fetch:
1283 if n[2] in m and n[3] in m:
1292 if n[2] in m and n[3] in m:
1284 self.ui.debug(_("found new changeset %s\n") %
1293 self.ui.debug(_("found new changeset %s\n") %
1285 short(n[1]))
1294 short(n[1]))
1286 fetch.add(n[1]) # earliest unknown
1295 fetch.add(n[1]) # earliest unknown
1287 for p in n[2:4]:
1296 for p in n[2:4]:
1288 if p in m:
1297 if p in m:
1289 base[p] = 1 # latest known
1298 base[p] = 1 # latest known
1290
1299
1291 for p in n[2:4]:
1300 for p in n[2:4]:
1292 if p not in req and p not in m:
1301 if p not in req and p not in m:
1293 r.append(p)
1302 r.append(p)
1294 req.add(p)
1303 req.add(p)
1295 seen.add(n[0])
1304 seen.add(n[0])
1296
1305
1297 if r:
1306 if r:
1298 reqcnt += 1
1307 reqcnt += 1
1299 self.ui.debug(_("request %d: %s\n") %
1308 self.ui.debug(_("request %d: %s\n") %
1300 (reqcnt, " ".join(map(short, r))))
1309 (reqcnt, " ".join(map(short, r))))
1301 for p in xrange(0, len(r), 10):
1310 for p in xrange(0, len(r), 10):
1302 for b in remote.branches(r[p:p+10]):
1311 for b in remote.branches(r[p:p+10]):
1303 self.ui.debug(_("received %s:%s\n") %
1312 self.ui.debug(_("received %s:%s\n") %
1304 (short(b[0]), short(b[1])))
1313 (short(b[0]), short(b[1])))
1305 unknown.append(b)
1314 unknown.append(b)
1306
1315
1307 # do binary search on the branches we found
1316 # do binary search on the branches we found
1308 while search:
1317 while search:
1309 newsearch = []
1318 newsearch = []
1310 reqcnt += 1
1319 reqcnt += 1
1311 for n, l in zip(search, remote.between(search)):
1320 for n, l in zip(search, remote.between(search)):
1312 l.append(n[1])
1321 l.append(n[1])
1313 p = n[0]
1322 p = n[0]
1314 f = 1
1323 f = 1
1315 for i in l:
1324 for i in l:
1316 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1325 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1317 if i in m:
1326 if i in m:
1318 if f <= 2:
1327 if f <= 2:
1319 self.ui.debug(_("found new branch changeset %s\n") %
1328 self.ui.debug(_("found new branch changeset %s\n") %
1320 short(p))
1329 short(p))
1321 fetch.add(p)
1330 fetch.add(p)
1322 base[i] = 1
1331 base[i] = 1
1323 else:
1332 else:
1324 self.ui.debug(_("narrowed branch search to %s:%s\n")
1333 self.ui.debug(_("narrowed branch search to %s:%s\n")
1325 % (short(p), short(i)))
1334 % (short(p), short(i)))
1326 newsearch.append((p, i))
1335 newsearch.append((p, i))
1327 break
1336 break
1328 p, f = i, f * 2
1337 p, f = i, f * 2
1329 search = newsearch
1338 search = newsearch
1330
1339
1331 # sanity check our fetch list
1340 # sanity check our fetch list
1332 for f in fetch:
1341 for f in fetch:
1333 if f in m:
1342 if f in m:
1334 raise error.RepoError(_("already have changeset ")
1343 raise error.RepoError(_("already have changeset ")
1335 + short(f[:4]))
1344 + short(f[:4]))
1336
1345
1337 if base.keys() == [nullid]:
1346 if base.keys() == [nullid]:
1338 if force:
1347 if force:
1339 self.ui.warn(_("warning: repository is unrelated\n"))
1348 self.ui.warn(_("warning: repository is unrelated\n"))
1340 else:
1349 else:
1341 raise util.Abort(_("repository is unrelated"))
1350 raise util.Abort(_("repository is unrelated"))
1342
1351
1343 self.ui.debug(_("found new changesets starting at ") +
1352 self.ui.debug(_("found new changesets starting at ") +
1344 " ".join([short(f) for f in fetch]) + "\n")
1353 " ".join([short(f) for f in fetch]) + "\n")
1345
1354
1346 self.ui.debug(_("%d total queries\n") % reqcnt)
1355 self.ui.debug(_("%d total queries\n") % reqcnt)
1347
1356
1348 return base.keys(), list(fetch), heads
1357 return base.keys(), list(fetch), heads
1349
1358
1350 def findoutgoing(self, remote, base=None, heads=None, force=False):
1359 def findoutgoing(self, remote, base=None, heads=None, force=False):
1351 """Return list of nodes that are roots of subsets not in remote
1360 """Return list of nodes that are roots of subsets not in remote
1352
1361
1353 If base dict is specified, assume that these nodes and their parents
1362 If base dict is specified, assume that these nodes and their parents
1354 exist on the remote side.
1363 exist on the remote side.
1355 If a list of heads is specified, return only nodes which are heads
1364 If a list of heads is specified, return only nodes which are heads
1356 or ancestors of these heads, and return a second element which
1365 or ancestors of these heads, and return a second element which
1357 contains all remote heads which get new children.
1366 contains all remote heads which get new children.
1358 """
1367 """
1359 if base is None:
1368 if base is None:
1360 base = {}
1369 base = {}
1361 self.findincoming(remote, base, heads, force=force)
1370 self.findincoming(remote, base, heads, force=force)
1362
1371
1363 self.ui.debug(_("common changesets up to ")
1372 self.ui.debug(_("common changesets up to ")
1364 + " ".join(map(short, base.keys())) + "\n")
1373 + " ".join(map(short, base.keys())) + "\n")
1365
1374
1366 remain = set(self.changelog.nodemap)
1375 remain = set(self.changelog.nodemap)
1367
1376
1368 # prune everything remote has from the tree
1377 # prune everything remote has from the tree
1369 remain.remove(nullid)
1378 remain.remove(nullid)
1370 remove = base.keys()
1379 remove = base.keys()
1371 while remove:
1380 while remove:
1372 n = remove.pop(0)
1381 n = remove.pop(0)
1373 if n in remain:
1382 if n in remain:
1374 remain.remove(n)
1383 remain.remove(n)
1375 for p in self.changelog.parents(n):
1384 for p in self.changelog.parents(n):
1376 remove.append(p)
1385 remove.append(p)
1377
1386
1378 # find every node whose parents have been pruned
1387 # find every node whose parents have been pruned
1379 subset = []
1388 subset = []
1380 # find every remote head that will get new children
1389 # find every remote head that will get new children
1381 updated_heads = set()
1390 updated_heads = set()
1382 for n in remain:
1391 for n in remain:
1383 p1, p2 = self.changelog.parents(n)
1392 p1, p2 = self.changelog.parents(n)
1384 if p1 not in remain and p2 not in remain:
1393 if p1 not in remain and p2 not in remain:
1385 subset.append(n)
1394 subset.append(n)
1386 if heads:
1395 if heads:
1387 if p1 in heads:
1396 if p1 in heads:
1388 updated_heads.add(p1)
1397 updated_heads.add(p1)
1389 if p2 in heads:
1398 if p2 in heads:
1390 updated_heads.add(p2)
1399 updated_heads.add(p2)
1391
1400
1392 # this is the set of all roots we have to push
1401 # this is the set of all roots we have to push
1393 if heads:
1402 if heads:
1394 return subset, list(updated_heads)
1403 return subset, list(updated_heads)
1395 else:
1404 else:
1396 return subset
1405 return subset
1397
1406
1398 def pull(self, remote, heads=None, force=False):
1407 def pull(self, remote, heads=None, force=False):
1399 lock = self.lock()
1408 lock = self.lock()
1400 try:
1409 try:
1401 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1410 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1402 force=force)
1411 force=force)
1403 if fetch == [nullid]:
1412 if fetch == [nullid]:
1404 self.ui.status(_("requesting all changes\n"))
1413 self.ui.status(_("requesting all changes\n"))
1405
1414
1406 if not fetch:
1415 if not fetch:
1407 self.ui.status(_("no changes found\n"))
1416 self.ui.status(_("no changes found\n"))
1408 return 0
1417 return 0
1409
1418
1410 if heads is None and remote.capable('changegroupsubset'):
1419 if heads is None and remote.capable('changegroupsubset'):
1411 heads = rheads
1420 heads = rheads
1412
1421
1413 if heads is None:
1422 if heads is None:
1414 cg = remote.changegroup(fetch, 'pull')
1423 cg = remote.changegroup(fetch, 'pull')
1415 else:
1424 else:
1416 if not remote.capable('changegroupsubset'):
1425 if not remote.capable('changegroupsubset'):
1417 raise util.Abort(_("Partial pull cannot be done because "
1426 raise util.Abort(_("Partial pull cannot be done because "
1418 "other repository doesn't support "
1427 "other repository doesn't support "
1419 "changegroupsubset."))
1428 "changegroupsubset."))
1420 cg = remote.changegroupsubset(fetch, heads, 'pull')
1429 cg = remote.changegroupsubset(fetch, heads, 'pull')
1421 return self.addchangegroup(cg, 'pull', remote.url())
1430 return self.addchangegroup(cg, 'pull', remote.url())
1422 finally:
1431 finally:
1423 lock.release()
1432 lock.release()
1424
1433
1425 def push(self, remote, force=False, revs=None):
1434 def push(self, remote, force=False, revs=None):
1426 # there are two ways to push to remote repo:
1435 # there are two ways to push to remote repo:
1427 #
1436 #
1428 # addchangegroup assumes local user can lock remote
1437 # addchangegroup assumes local user can lock remote
1429 # repo (local filesystem, old ssh servers).
1438 # repo (local filesystem, old ssh servers).
1430 #
1439 #
1431 # unbundle assumes local user cannot lock remote repo (new ssh
1440 # unbundle assumes local user cannot lock remote repo (new ssh
1432 # servers, http servers).
1441 # servers, http servers).
1433
1442
1434 if remote.capable('unbundle'):
1443 if remote.capable('unbundle'):
1435 return self.push_unbundle(remote, force, revs)
1444 return self.push_unbundle(remote, force, revs)
1436 return self.push_addchangegroup(remote, force, revs)
1445 return self.push_addchangegroup(remote, force, revs)
1437
1446
1438 def prepush(self, remote, force, revs):
1447 def prepush(self, remote, force, revs):
1439 common = {}
1448 common = {}
1440 remote_heads = remote.heads()
1449 remote_heads = remote.heads()
1441 inc = self.findincoming(remote, common, remote_heads, force=force)
1450 inc = self.findincoming(remote, common, remote_heads, force=force)
1442
1451
1443 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1452 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1444 if revs is not None:
1453 if revs is not None:
1445 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1454 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1446 else:
1455 else:
1447 bases, heads = update, self.changelog.heads()
1456 bases, heads = update, self.changelog.heads()
1448
1457
1449 def checkbranch(lheads, rheads, updatelh):
1458 def checkbranch(lheads, rheads, updatelh):
1450 '''
1459 '''
1451 check whether there are more local heads than remote heads on
1460 check whether there are more local heads than remote heads on
1452 a specific branch.
1461 a specific branch.
1453
1462
1454 lheads: local branch heads
1463 lheads: local branch heads
1455 rheads: remote branch heads
1464 rheads: remote branch heads
1456 updatelh: outgoing local branch heads
1465 updatelh: outgoing local branch heads
1457 '''
1466 '''
1458
1467
1459 warn = 0
1468 warn = 0
1460
1469
1461 if not revs and len(lheads) > len(rheads):
1470 if not revs and len(lheads) > len(rheads):
1462 warn = 1
1471 warn = 1
1463 else:
1472 else:
1464 updatelheads = [self.changelog.heads(x, lheads)
1473 updatelheads = [self.changelog.heads(x, lheads)
1465 for x in updatelh]
1474 for x in updatelh]
1466 newheads = set(sum(updatelheads, [])) & set(lheads)
1475 newheads = set(sum(updatelheads, [])) & set(lheads)
1467
1476
1468 if not newheads:
1477 if not newheads:
1469 return True
1478 return True
1470
1479
1471 for r in rheads:
1480 for r in rheads:
1472 if r in self.changelog.nodemap:
1481 if r in self.changelog.nodemap:
1473 desc = self.changelog.heads(r, heads)
1482 desc = self.changelog.heads(r, heads)
1474 l = [h for h in heads if h in desc]
1483 l = [h for h in heads if h in desc]
1475 if not l:
1484 if not l:
1476 newheads.add(r)
1485 newheads.add(r)
1477 else:
1486 else:
1478 newheads.add(r)
1487 newheads.add(r)
1479 if len(newheads) > len(rheads):
1488 if len(newheads) > len(rheads):
1480 warn = 1
1489 warn = 1
1481
1490
1482 if warn:
1491 if warn:
1483 if not rheads: # new branch requires --force
1492 if not rheads: # new branch requires --force
1484 self.ui.warn(_("abort: push creates new"
1493 self.ui.warn(_("abort: push creates new"
1485 " remote branch '%s'!\n") %
1494 " remote branch '%s'!\n") %
1486 self[updatelh[0]].branch())
1495 self[updatelh[0]].branch())
1487 else:
1496 else:
1488 self.ui.warn(_("abort: push creates new remote heads!\n"))
1497 self.ui.warn(_("abort: push creates new remote heads!\n"))
1489
1498
1490 self.ui.status(_("(did you forget to merge?"
1499 self.ui.status(_("(did you forget to merge?"
1491 " use push -f to force)\n"))
1500 " use push -f to force)\n"))
1492 return False
1501 return False
1493 return True
1502 return True
1494
1503
1495 if not bases:
1504 if not bases:
1496 self.ui.status(_("no changes found\n"))
1505 self.ui.status(_("no changes found\n"))
1497 return None, 1
1506 return None, 1
1498 elif not force:
1507 elif not force:
1499 # Check for each named branch if we're creating new remote heads.
1508 # Check for each named branch if we're creating new remote heads.
1500 # To be a remote head after push, node must be either:
1509 # To be a remote head after push, node must be either:
1501 # - unknown locally
1510 # - unknown locally
1502 # - a local outgoing head descended from update
1511 # - a local outgoing head descended from update
1503 # - a remote head that's known locally and not
1512 # - a remote head that's known locally and not
1504 # ancestral to an outgoing head
1513 # ancestral to an outgoing head
1505 #
1514 #
1506 # New named branches cannot be created without --force.
1515 # New named branches cannot be created without --force.
1507
1516
1508 if remote_heads != [nullid]:
1517 if remote_heads != [nullid]:
1509 if remote.capable('branchmap'):
1518 if remote.capable('branchmap'):
1510 localhds = {}
1519 localhds = {}
1511 if not revs:
1520 if not revs:
1512 localhds = self.branchmap()
1521 localhds = self.branchmap()
1513 else:
1522 else:
1514 for n in heads:
1523 for n in heads:
1515 branch = self[n].branch()
1524 branch = self[n].branch()
1516 if branch in localhds:
1525 if branch in localhds:
1517 localhds[branch].append(n)
1526 localhds[branch].append(n)
1518 else:
1527 else:
1519 localhds[branch] = [n]
1528 localhds[branch] = [n]
1520
1529
1521 remotehds = remote.branchmap()
1530 remotehds = remote.branchmap()
1522
1531
1523 for lh in localhds:
1532 for lh in localhds:
1524 if lh in remotehds:
1533 if lh in remotehds:
1525 rheads = remotehds[lh]
1534 rheads = remotehds[lh]
1526 else:
1535 else:
1527 rheads = []
1536 rheads = []
1528 lheads = localhds[lh]
1537 lheads = localhds[lh]
1529 updatelh = [upd for upd in update
1538 updatelh = [upd for upd in update
1530 if self[upd].branch() == lh]
1539 if self[upd].branch() == lh]
1531 if not updatelh:
1540 if not updatelh:
1532 continue
1541 continue
1533 if not checkbranch(lheads, rheads, updatelh):
1542 if not checkbranch(lheads, rheads, updatelh):
1534 return None, 0
1543 return None, 0
1535 else:
1544 else:
1536 if not checkbranch(heads, remote_heads, update):
1545 if not checkbranch(heads, remote_heads, update):
1537 return None, 0
1546 return None, 0
1538
1547
1539 if inc:
1548 if inc:
1540 self.ui.warn(_("note: unsynced remote changes!\n"))
1549 self.ui.warn(_("note: unsynced remote changes!\n"))
1541
1550
1542
1551
1543 if revs is None:
1552 if revs is None:
1544 # use the fast path, no race possible on push
1553 # use the fast path, no race possible on push
1545 cg = self._changegroup(common.keys(), 'push')
1554 cg = self._changegroup(common.keys(), 'push')
1546 else:
1555 else:
1547 cg = self.changegroupsubset(update, revs, 'push')
1556 cg = self.changegroupsubset(update, revs, 'push')
1548 return cg, remote_heads
1557 return cg, remote_heads
1549
1558
1550 def push_addchangegroup(self, remote, force, revs):
1559 def push_addchangegroup(self, remote, force, revs):
1551 lock = remote.lock()
1560 lock = remote.lock()
1552 try:
1561 try:
1553 ret = self.prepush(remote, force, revs)
1562 ret = self.prepush(remote, force, revs)
1554 if ret[0] is not None:
1563 if ret[0] is not None:
1555 cg, remote_heads = ret
1564 cg, remote_heads = ret
1556 return remote.addchangegroup(cg, 'push', self.url())
1565 return remote.addchangegroup(cg, 'push', self.url())
1557 return ret[1]
1566 return ret[1]
1558 finally:
1567 finally:
1559 lock.release()
1568 lock.release()
1560
1569
1561 def push_unbundle(self, remote, force, revs):
1570 def push_unbundle(self, remote, force, revs):
1562 # local repo finds heads on server, finds out what revs it
1571 # local repo finds heads on server, finds out what revs it
1563 # must push. once revs transferred, if server finds it has
1572 # must push. once revs transferred, if server finds it has
1564 # different heads (someone else won commit/push race), server
1573 # different heads (someone else won commit/push race), server
1565 # aborts.
1574 # aborts.
1566
1575
1567 ret = self.prepush(remote, force, revs)
1576 ret = self.prepush(remote, force, revs)
1568 if ret[0] is not None:
1577 if ret[0] is not None:
1569 cg, remote_heads = ret
1578 cg, remote_heads = ret
1570 if force: remote_heads = ['force']
1579 if force: remote_heads = ['force']
1571 return remote.unbundle(cg, remote_heads, 'push')
1580 return remote.unbundle(cg, remote_heads, 'push')
1572 return ret[1]
1581 return ret[1]
1573
1582
1574 def changegroupinfo(self, nodes, source):
1583 def changegroupinfo(self, nodes, source):
1575 if self.ui.verbose or source == 'bundle':
1584 if self.ui.verbose or source == 'bundle':
1576 self.ui.status(_("%d changesets found\n") % len(nodes))
1585 self.ui.status(_("%d changesets found\n") % len(nodes))
1577 if self.ui.debugflag:
1586 if self.ui.debugflag:
1578 self.ui.debug(_("list of changesets:\n"))
1587 self.ui.debug(_("list of changesets:\n"))
1579 for node in nodes:
1588 for node in nodes:
1580 self.ui.debug("%s\n" % hex(node))
1589 self.ui.debug("%s\n" % hex(node))
1581
1590
1582 def changegroupsubset(self, bases, heads, source, extranodes=None):
1591 def changegroupsubset(self, bases, heads, source, extranodes=None):
1583 """This function generates a changegroup consisting of all the nodes
1592 """This function generates a changegroup consisting of all the nodes
1584 that are descendents of any of the bases, and ancestors of any of
1593 that are descendents of any of the bases, and ancestors of any of
1585 the heads.
1594 the heads.
1586
1595
1587 It is fairly complex as determining which filenodes and which
1596 It is fairly complex as determining which filenodes and which
1588 manifest nodes need to be included for the changeset to be complete
1597 manifest nodes need to be included for the changeset to be complete
1589 is non-trivial.
1598 is non-trivial.
1590
1599
1591 Another wrinkle is doing the reverse, figuring out which changeset in
1600 Another wrinkle is doing the reverse, figuring out which changeset in
1592 the changegroup a particular filenode or manifestnode belongs to.
1601 the changegroup a particular filenode or manifestnode belongs to.
1593
1602
1594 The caller can specify some nodes that must be included in the
1603 The caller can specify some nodes that must be included in the
1595 changegroup using the extranodes argument. It should be a dict
1604 changegroup using the extranodes argument. It should be a dict
1596 where the keys are the filenames (or 1 for the manifest), and the
1605 where the keys are the filenames (or 1 for the manifest), and the
1597 values are lists of (node, linknode) tuples, where node is a wanted
1606 values are lists of (node, linknode) tuples, where node is a wanted
1598 node and linknode is the changelog node that should be transmitted as
1607 node and linknode is the changelog node that should be transmitted as
1599 the linkrev.
1608 the linkrev.
1600 """
1609 """
1601
1610
1602 if extranodes is None:
1611 if extranodes is None:
1603 # can we go through the fast path ?
1612 # can we go through the fast path ?
1604 heads.sort()
1613 heads.sort()
1605 allheads = self.heads()
1614 allheads = self.heads()
1606 allheads.sort()
1615 allheads.sort()
1607 if heads == allheads:
1616 if heads == allheads:
1608 common = []
1617 common = []
1609 # parents of bases are known from both sides
1618 # parents of bases are known from both sides
1610 for n in bases:
1619 for n in bases:
1611 for p in self.changelog.parents(n):
1620 for p in self.changelog.parents(n):
1612 if p != nullid:
1621 if p != nullid:
1613 common.append(p)
1622 common.append(p)
1614 return self._changegroup(common, source)
1623 return self._changegroup(common, source)
1615
1624
1616 self.hook('preoutgoing', throw=True, source=source)
1625 self.hook('preoutgoing', throw=True, source=source)
1617
1626
1618 # Set up some initial variables
1627 # Set up some initial variables
1619 # Make it easy to refer to self.changelog
1628 # Make it easy to refer to self.changelog
1620 cl = self.changelog
1629 cl = self.changelog
1621 # msng is short for missing - compute the list of changesets in this
1630 # msng is short for missing - compute the list of changesets in this
1622 # changegroup.
1631 # changegroup.
1623 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1632 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1624 self.changegroupinfo(msng_cl_lst, source)
1633 self.changegroupinfo(msng_cl_lst, source)
1625 # Some bases may turn out to be superfluous, and some heads may be
1634 # Some bases may turn out to be superfluous, and some heads may be
1626 # too. nodesbetween will return the minimal set of bases and heads
1635 # too. nodesbetween will return the minimal set of bases and heads
1627 # necessary to re-create the changegroup.
1636 # necessary to re-create the changegroup.
1628
1637
1629 # Known heads are the list of heads that it is assumed the recipient
1638 # Known heads are the list of heads that it is assumed the recipient
1630 # of this changegroup will know about.
1639 # of this changegroup will know about.
1631 knownheads = set()
1640 knownheads = set()
1632 # We assume that all parents of bases are known heads.
1641 # We assume that all parents of bases are known heads.
1633 for n in bases:
1642 for n in bases:
1634 knownheads.update(cl.parents(n))
1643 knownheads.update(cl.parents(n))
1635 knownheads.discard(nullid)
1644 knownheads.discard(nullid)
1636 knownheads = list(knownheads)
1645 knownheads = list(knownheads)
1637 if knownheads:
1646 if knownheads:
1638 # Now that we know what heads are known, we can compute which
1647 # Now that we know what heads are known, we can compute which
1639 # changesets are known. The recipient must know about all
1648 # changesets are known. The recipient must know about all
1640 # changesets required to reach the known heads from the null
1649 # changesets required to reach the known heads from the null
1641 # changeset.
1650 # changeset.
1642 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1651 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1643 junk = None
1652 junk = None
1644 # Transform the list into a set.
1653 # Transform the list into a set.
1645 has_cl_set = set(has_cl_set)
1654 has_cl_set = set(has_cl_set)
1646 else:
1655 else:
1647 # If there were no known heads, the recipient cannot be assumed to
1656 # If there were no known heads, the recipient cannot be assumed to
1648 # know about any changesets.
1657 # know about any changesets.
1649 has_cl_set = set()
1658 has_cl_set = set()
1650
1659
1651 # Make it easy to refer to self.manifest
1660 # Make it easy to refer to self.manifest
1652 mnfst = self.manifest
1661 mnfst = self.manifest
1653 # We don't know which manifests are missing yet
1662 # We don't know which manifests are missing yet
1654 msng_mnfst_set = {}
1663 msng_mnfst_set = {}
1655 # Nor do we know which filenodes are missing.
1664 # Nor do we know which filenodes are missing.
1656 msng_filenode_set = {}
1665 msng_filenode_set = {}
1657
1666
1658 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1667 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1659 junk = None
1668 junk = None
1660
1669
1661 # A changeset always belongs to itself, so the changenode lookup
1670 # A changeset always belongs to itself, so the changenode lookup
1662 # function for a changenode is identity.
1671 # function for a changenode is identity.
1663 def identity(x):
1672 def identity(x):
1664 return x
1673 return x
1665
1674
1666 # If we determine that a particular file or manifest node must be a
1675 # If we determine that a particular file or manifest node must be a
1667 # node that the recipient of the changegroup will already have, we can
1676 # node that the recipient of the changegroup will already have, we can
1668 # also assume the recipient will have all the parents. This function
1677 # also assume the recipient will have all the parents. This function
1669 # prunes them from the set of missing nodes.
1678 # prunes them from the set of missing nodes.
1670 def prune_parents(revlog, hasset, msngset):
1679 def prune_parents(revlog, hasset, msngset):
1671 haslst = list(hasset)
1680 haslst = list(hasset)
1672 haslst.sort(key=revlog.rev)
1681 haslst.sort(key=revlog.rev)
1673 for node in haslst:
1682 for node in haslst:
1674 parentlst = [p for p in revlog.parents(node) if p != nullid]
1683 parentlst = [p for p in revlog.parents(node) if p != nullid]
1675 while parentlst:
1684 while parentlst:
1676 n = parentlst.pop()
1685 n = parentlst.pop()
1677 if n not in hasset:
1686 if n not in hasset:
1678 hasset.add(n)
1687 hasset.add(n)
1679 p = [p for p in revlog.parents(n) if p != nullid]
1688 p = [p for p in revlog.parents(n) if p != nullid]
1680 parentlst.extend(p)
1689 parentlst.extend(p)
1681 for n in hasset:
1690 for n in hasset:
1682 msngset.pop(n, None)
1691 msngset.pop(n, None)
1683
1692
1684 # This is a function generating function used to set up an environment
1693 # This is a function generating function used to set up an environment
1685 # for the inner function to execute in.
1694 # for the inner function to execute in.
1686 def manifest_and_file_collector(changedfileset):
1695 def manifest_and_file_collector(changedfileset):
1687 # This is an information gathering function that gathers
1696 # This is an information gathering function that gathers
1688 # information from each changeset node that goes out as part of
1697 # information from each changeset node that goes out as part of
1689 # the changegroup. The information gathered is a list of which
1698 # the changegroup. The information gathered is a list of which
1690 # manifest nodes are potentially required (the recipient may
1699 # manifest nodes are potentially required (the recipient may
1691 # already have them) and total list of all files which were
1700 # already have them) and total list of all files which were
1692 # changed in any changeset in the changegroup.
1701 # changed in any changeset in the changegroup.
1693 #
1702 #
1694 # We also remember the first changenode we saw any manifest
1703 # We also remember the first changenode we saw any manifest
1695 # referenced by so we can later determine which changenode 'owns'
1704 # referenced by so we can later determine which changenode 'owns'
1696 # the manifest.
1705 # the manifest.
1697 def collect_manifests_and_files(clnode):
1706 def collect_manifests_and_files(clnode):
1698 c = cl.read(clnode)
1707 c = cl.read(clnode)
1699 for f in c[3]:
1708 for f in c[3]:
1700 # This is to make sure we only have one instance of each
1709 # This is to make sure we only have one instance of each
1701 # filename string for each filename.
1710 # filename string for each filename.
1702 changedfileset.setdefault(f, f)
1711 changedfileset.setdefault(f, f)
1703 msng_mnfst_set.setdefault(c[0], clnode)
1712 msng_mnfst_set.setdefault(c[0], clnode)
1704 return collect_manifests_and_files
1713 return collect_manifests_and_files
1705
1714
1706 # Figure out which manifest nodes (of the ones we think might be part
1715 # Figure out which manifest nodes (of the ones we think might be part
1707 # of the changegroup) the recipient must know about and remove them
1716 # of the changegroup) the recipient must know about and remove them
1708 # from the changegroup.
1717 # from the changegroup.
1709 def prune_manifests():
1718 def prune_manifests():
1710 has_mnfst_set = set()
1719 has_mnfst_set = set()
1711 for n in msng_mnfst_set:
1720 for n in msng_mnfst_set:
1712 # If a 'missing' manifest thinks it belongs to a changenode
1721 # If a 'missing' manifest thinks it belongs to a changenode
1713 # the recipient is assumed to have, obviously the recipient
1722 # the recipient is assumed to have, obviously the recipient
1714 # must have that manifest.
1723 # must have that manifest.
1715 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1724 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1716 if linknode in has_cl_set:
1725 if linknode in has_cl_set:
1717 has_mnfst_set.add(n)
1726 has_mnfst_set.add(n)
1718 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1727 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1719
1728
1720 # Use the information collected in collect_manifests_and_files to say
1729 # Use the information collected in collect_manifests_and_files to say
1721 # which changenode any manifestnode belongs to.
1730 # which changenode any manifestnode belongs to.
1722 def lookup_manifest_link(mnfstnode):
1731 def lookup_manifest_link(mnfstnode):
1723 return msng_mnfst_set[mnfstnode]
1732 return msng_mnfst_set[mnfstnode]
1724
1733
1725 # A function generating function that sets up the initial environment
1734 # A function generating function that sets up the initial environment
1726 # the inner function.
1735 # the inner function.
1727 def filenode_collector(changedfiles):
1736 def filenode_collector(changedfiles):
1728 next_rev = [0]
1737 next_rev = [0]
1729 # This gathers information from each manifestnode included in the
1738 # This gathers information from each manifestnode included in the
1730 # changegroup about which filenodes the manifest node references
1739 # changegroup about which filenodes the manifest node references
1731 # so we can include those in the changegroup too.
1740 # so we can include those in the changegroup too.
1732 #
1741 #
1733 # It also remembers which changenode each filenode belongs to. It
1742 # It also remembers which changenode each filenode belongs to. It
1734 # does this by assuming the a filenode belongs to the changenode
1743 # does this by assuming the a filenode belongs to the changenode
1735 # the first manifest that references it belongs to.
1744 # the first manifest that references it belongs to.
1736 def collect_msng_filenodes(mnfstnode):
1745 def collect_msng_filenodes(mnfstnode):
1737 r = mnfst.rev(mnfstnode)
1746 r = mnfst.rev(mnfstnode)
1738 if r == next_rev[0]:
1747 if r == next_rev[0]:
1739 # If the last rev we looked at was the one just previous,
1748 # If the last rev we looked at was the one just previous,
1740 # we only need to see a diff.
1749 # we only need to see a diff.
1741 deltamf = mnfst.readdelta(mnfstnode)
1750 deltamf = mnfst.readdelta(mnfstnode)
1742 # For each line in the delta
1751 # For each line in the delta
1743 for f, fnode in deltamf.iteritems():
1752 for f, fnode in deltamf.iteritems():
1744 f = changedfiles.get(f, None)
1753 f = changedfiles.get(f, None)
1745 # And if the file is in the list of files we care
1754 # And if the file is in the list of files we care
1746 # about.
1755 # about.
1747 if f is not None:
1756 if f is not None:
1748 # Get the changenode this manifest belongs to
1757 # Get the changenode this manifest belongs to
1749 clnode = msng_mnfst_set[mnfstnode]
1758 clnode = msng_mnfst_set[mnfstnode]
1750 # Create the set of filenodes for the file if
1759 # Create the set of filenodes for the file if
1751 # there isn't one already.
1760 # there isn't one already.
1752 ndset = msng_filenode_set.setdefault(f, {})
1761 ndset = msng_filenode_set.setdefault(f, {})
1753 # And set the filenode's changelog node to the
1762 # And set the filenode's changelog node to the
1754 # manifest's if it hasn't been set already.
1763 # manifest's if it hasn't been set already.
1755 ndset.setdefault(fnode, clnode)
1764 ndset.setdefault(fnode, clnode)
1756 else:
1765 else:
1757 # Otherwise we need a full manifest.
1766 # Otherwise we need a full manifest.
1758 m = mnfst.read(mnfstnode)
1767 m = mnfst.read(mnfstnode)
1759 # For every file in we care about.
1768 # For every file in we care about.
1760 for f in changedfiles:
1769 for f in changedfiles:
1761 fnode = m.get(f, None)
1770 fnode = m.get(f, None)
1762 # If it's in the manifest
1771 # If it's in the manifest
1763 if fnode is not None:
1772 if fnode is not None:
1764 # See comments above.
1773 # See comments above.
1765 clnode = msng_mnfst_set[mnfstnode]
1774 clnode = msng_mnfst_set[mnfstnode]
1766 ndset = msng_filenode_set.setdefault(f, {})
1775 ndset = msng_filenode_set.setdefault(f, {})
1767 ndset.setdefault(fnode, clnode)
1776 ndset.setdefault(fnode, clnode)
1768 # Remember the revision we hope to see next.
1777 # Remember the revision we hope to see next.
1769 next_rev[0] = r + 1
1778 next_rev[0] = r + 1
1770 return collect_msng_filenodes
1779 return collect_msng_filenodes
1771
1780
1772 # We have a list of filenodes we think we need for a file, lets remove
1781 # We have a list of filenodes we think we need for a file, lets remove
1773 # all those we know the recipient must have.
1782 # all those we know the recipient must have.
1774 def prune_filenodes(f, filerevlog):
1783 def prune_filenodes(f, filerevlog):
1775 msngset = msng_filenode_set[f]
1784 msngset = msng_filenode_set[f]
1776 hasset = set()
1785 hasset = set()
1777 # If a 'missing' filenode thinks it belongs to a changenode we
1786 # If a 'missing' filenode thinks it belongs to a changenode we
1778 # assume the recipient must have, then the recipient must have
1787 # assume the recipient must have, then the recipient must have
1779 # that filenode.
1788 # that filenode.
1780 for n in msngset:
1789 for n in msngset:
1781 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1790 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1782 if clnode in has_cl_set:
1791 if clnode in has_cl_set:
1783 hasset.add(n)
1792 hasset.add(n)
1784 prune_parents(filerevlog, hasset, msngset)
1793 prune_parents(filerevlog, hasset, msngset)
1785
1794
1786 # A function generator function that sets up the a context for the
1795 # A function generator function that sets up the a context for the
1787 # inner function.
1796 # inner function.
1788 def lookup_filenode_link_func(fname):
1797 def lookup_filenode_link_func(fname):
1789 msngset = msng_filenode_set[fname]
1798 msngset = msng_filenode_set[fname]
1790 # Lookup the changenode the filenode belongs to.
1799 # Lookup the changenode the filenode belongs to.
1791 def lookup_filenode_link(fnode):
1800 def lookup_filenode_link(fnode):
1792 return msngset[fnode]
1801 return msngset[fnode]
1793 return lookup_filenode_link
1802 return lookup_filenode_link
1794
1803
1795 # Add the nodes that were explicitly requested.
1804 # Add the nodes that were explicitly requested.
1796 def add_extra_nodes(name, nodes):
1805 def add_extra_nodes(name, nodes):
1797 if not extranodes or name not in extranodes:
1806 if not extranodes or name not in extranodes:
1798 return
1807 return
1799
1808
1800 for node, linknode in extranodes[name]:
1809 for node, linknode in extranodes[name]:
1801 if node not in nodes:
1810 if node not in nodes:
1802 nodes[node] = linknode
1811 nodes[node] = linknode
1803
1812
1804 # Now that we have all theses utility functions to help out and
1813 # Now that we have all theses utility functions to help out and
1805 # logically divide up the task, generate the group.
1814 # logically divide up the task, generate the group.
1806 def gengroup():
1815 def gengroup():
1807 # The set of changed files starts empty.
1816 # The set of changed files starts empty.
1808 changedfiles = {}
1817 changedfiles = {}
1809 # Create a changenode group generator that will call our functions
1818 # Create a changenode group generator that will call our functions
1810 # back to lookup the owning changenode and collect information.
1819 # back to lookup the owning changenode and collect information.
1811 group = cl.group(msng_cl_lst, identity,
1820 group = cl.group(msng_cl_lst, identity,
1812 manifest_and_file_collector(changedfiles))
1821 manifest_and_file_collector(changedfiles))
1813 for chnk in group:
1822 for chnk in group:
1814 yield chnk
1823 yield chnk
1815
1824
1816 # The list of manifests has been collected by the generator
1825 # The list of manifests has been collected by the generator
1817 # calling our functions back.
1826 # calling our functions back.
1818 prune_manifests()
1827 prune_manifests()
1819 add_extra_nodes(1, msng_mnfst_set)
1828 add_extra_nodes(1, msng_mnfst_set)
1820 msng_mnfst_lst = msng_mnfst_set.keys()
1829 msng_mnfst_lst = msng_mnfst_set.keys()
1821 # Sort the manifestnodes by revision number.
1830 # Sort the manifestnodes by revision number.
1822 msng_mnfst_lst.sort(key=mnfst.rev)
1831 msng_mnfst_lst.sort(key=mnfst.rev)
1823 # Create a generator for the manifestnodes that calls our lookup
1832 # Create a generator for the manifestnodes that calls our lookup
1824 # and data collection functions back.
1833 # and data collection functions back.
1825 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1834 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1826 filenode_collector(changedfiles))
1835 filenode_collector(changedfiles))
1827 for chnk in group:
1836 for chnk in group:
1828 yield chnk
1837 yield chnk
1829
1838
1830 # These are no longer needed, dereference and toss the memory for
1839 # These are no longer needed, dereference and toss the memory for
1831 # them.
1840 # them.
1832 msng_mnfst_lst = None
1841 msng_mnfst_lst = None
1833 msng_mnfst_set.clear()
1842 msng_mnfst_set.clear()
1834
1843
1835 if extranodes:
1844 if extranodes:
1836 for fname in extranodes:
1845 for fname in extranodes:
1837 if isinstance(fname, int):
1846 if isinstance(fname, int):
1838 continue
1847 continue
1839 msng_filenode_set.setdefault(fname, {})
1848 msng_filenode_set.setdefault(fname, {})
1840 changedfiles[fname] = 1
1849 changedfiles[fname] = 1
1841 # Go through all our files in order sorted by name.
1850 # Go through all our files in order sorted by name.
1842 for fname in sorted(changedfiles):
1851 for fname in sorted(changedfiles):
1843 filerevlog = self.file(fname)
1852 filerevlog = self.file(fname)
1844 if not len(filerevlog):
1853 if not len(filerevlog):
1845 raise util.Abort(_("empty or missing revlog for %s") % fname)
1854 raise util.Abort(_("empty or missing revlog for %s") % fname)
1846 # Toss out the filenodes that the recipient isn't really
1855 # Toss out the filenodes that the recipient isn't really
1847 # missing.
1856 # missing.
1848 if fname in msng_filenode_set:
1857 if fname in msng_filenode_set:
1849 prune_filenodes(fname, filerevlog)
1858 prune_filenodes(fname, filerevlog)
1850 add_extra_nodes(fname, msng_filenode_set[fname])
1859 add_extra_nodes(fname, msng_filenode_set[fname])
1851 msng_filenode_lst = msng_filenode_set[fname].keys()
1860 msng_filenode_lst = msng_filenode_set[fname].keys()
1852 else:
1861 else:
1853 msng_filenode_lst = []
1862 msng_filenode_lst = []
1854 # If any filenodes are left, generate the group for them,
1863 # If any filenodes are left, generate the group for them,
1855 # otherwise don't bother.
1864 # otherwise don't bother.
1856 if len(msng_filenode_lst) > 0:
1865 if len(msng_filenode_lst) > 0:
1857 yield changegroup.chunkheader(len(fname))
1866 yield changegroup.chunkheader(len(fname))
1858 yield fname
1867 yield fname
1859 # Sort the filenodes by their revision #
1868 # Sort the filenodes by their revision #
1860 msng_filenode_lst.sort(key=filerevlog.rev)
1869 msng_filenode_lst.sort(key=filerevlog.rev)
1861 # Create a group generator and only pass in a changenode
1870 # Create a group generator and only pass in a changenode
1862 # lookup function as we need to collect no information
1871 # lookup function as we need to collect no information
1863 # from filenodes.
1872 # from filenodes.
1864 group = filerevlog.group(msng_filenode_lst,
1873 group = filerevlog.group(msng_filenode_lst,
1865 lookup_filenode_link_func(fname))
1874 lookup_filenode_link_func(fname))
1866 for chnk in group:
1875 for chnk in group:
1867 yield chnk
1876 yield chnk
1868 if fname in msng_filenode_set:
1877 if fname in msng_filenode_set:
1869 # Don't need this anymore, toss it to free memory.
1878 # Don't need this anymore, toss it to free memory.
1870 del msng_filenode_set[fname]
1879 del msng_filenode_set[fname]
1871 # Signal that no more groups are left.
1880 # Signal that no more groups are left.
1872 yield changegroup.closechunk()
1881 yield changegroup.closechunk()
1873
1882
1874 if msng_cl_lst:
1883 if msng_cl_lst:
1875 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1884 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1876
1885
1877 return util.chunkbuffer(gengroup())
1886 return util.chunkbuffer(gengroup())
1878
1887
1879 def changegroup(self, basenodes, source):
1888 def changegroup(self, basenodes, source):
1880 # to avoid a race we use changegroupsubset() (issue1320)
1889 # to avoid a race we use changegroupsubset() (issue1320)
1881 return self.changegroupsubset(basenodes, self.heads(), source)
1890 return self.changegroupsubset(basenodes, self.heads(), source)
1882
1891
1883 def _changegroup(self, common, source):
1892 def _changegroup(self, common, source):
1884 """Generate a changegroup of all nodes that we have that a recipient
1893 """Generate a changegroup of all nodes that we have that a recipient
1885 doesn't.
1894 doesn't.
1886
1895
1887 This is much easier than the previous function as we can assume that
1896 This is much easier than the previous function as we can assume that
1888 the recipient has any changenode we aren't sending them.
1897 the recipient has any changenode we aren't sending them.
1889
1898
1890 common is the set of common nodes between remote and self"""
1899 common is the set of common nodes between remote and self"""
1891
1900
1892 self.hook('preoutgoing', throw=True, source=source)
1901 self.hook('preoutgoing', throw=True, source=source)
1893
1902
1894 cl = self.changelog
1903 cl = self.changelog
1895 nodes = cl.findmissing(common)
1904 nodes = cl.findmissing(common)
1896 revset = set([cl.rev(n) for n in nodes])
1905 revset = set([cl.rev(n) for n in nodes])
1897 self.changegroupinfo(nodes, source)
1906 self.changegroupinfo(nodes, source)
1898
1907
1899 def identity(x):
1908 def identity(x):
1900 return x
1909 return x
1901
1910
1902 def gennodelst(log):
1911 def gennodelst(log):
1903 for r in log:
1912 for r in log:
1904 if log.linkrev(r) in revset:
1913 if log.linkrev(r) in revset:
1905 yield log.node(r)
1914 yield log.node(r)
1906
1915
1907 def changed_file_collector(changedfileset):
1916 def changed_file_collector(changedfileset):
1908 def collect_changed_files(clnode):
1917 def collect_changed_files(clnode):
1909 c = cl.read(clnode)
1918 c = cl.read(clnode)
1910 changedfileset.update(c[3])
1919 changedfileset.update(c[3])
1911 return collect_changed_files
1920 return collect_changed_files
1912
1921
1913 def lookuprevlink_func(revlog):
1922 def lookuprevlink_func(revlog):
1914 def lookuprevlink(n):
1923 def lookuprevlink(n):
1915 return cl.node(revlog.linkrev(revlog.rev(n)))
1924 return cl.node(revlog.linkrev(revlog.rev(n)))
1916 return lookuprevlink
1925 return lookuprevlink
1917
1926
1918 def gengroup():
1927 def gengroup():
1919 # construct a list of all changed files
1928 # construct a list of all changed files
1920 changedfiles = set()
1929 changedfiles = set()
1921
1930
1922 for chnk in cl.group(nodes, identity,
1931 for chnk in cl.group(nodes, identity,
1923 changed_file_collector(changedfiles)):
1932 changed_file_collector(changedfiles)):
1924 yield chnk
1933 yield chnk
1925
1934
1926 mnfst = self.manifest
1935 mnfst = self.manifest
1927 nodeiter = gennodelst(mnfst)
1936 nodeiter = gennodelst(mnfst)
1928 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1937 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1929 yield chnk
1938 yield chnk
1930
1939
1931 for fname in sorted(changedfiles):
1940 for fname in sorted(changedfiles):
1932 filerevlog = self.file(fname)
1941 filerevlog = self.file(fname)
1933 if not len(filerevlog):
1942 if not len(filerevlog):
1934 raise util.Abort(_("empty or missing revlog for %s") % fname)
1943 raise util.Abort(_("empty or missing revlog for %s") % fname)
1935 nodeiter = gennodelst(filerevlog)
1944 nodeiter = gennodelst(filerevlog)
1936 nodeiter = list(nodeiter)
1945 nodeiter = list(nodeiter)
1937 if nodeiter:
1946 if nodeiter:
1938 yield changegroup.chunkheader(len(fname))
1947 yield changegroup.chunkheader(len(fname))
1939 yield fname
1948 yield fname
1940 lookup = lookuprevlink_func(filerevlog)
1949 lookup = lookuprevlink_func(filerevlog)
1941 for chnk in filerevlog.group(nodeiter, lookup):
1950 for chnk in filerevlog.group(nodeiter, lookup):
1942 yield chnk
1951 yield chnk
1943
1952
1944 yield changegroup.closechunk()
1953 yield changegroup.closechunk()
1945
1954
1946 if nodes:
1955 if nodes:
1947 self.hook('outgoing', node=hex(nodes[0]), source=source)
1956 self.hook('outgoing', node=hex(nodes[0]), source=source)
1948
1957
1949 return util.chunkbuffer(gengroup())
1958 return util.chunkbuffer(gengroup())
1950
1959
1951 def addchangegroup(self, source, srctype, url, emptyok=False):
1960 def addchangegroup(self, source, srctype, url, emptyok=False):
1952 """add changegroup to repo.
1961 """add changegroup to repo.
1953
1962
1954 return values:
1963 return values:
1955 - nothing changed or no source: 0
1964 - nothing changed or no source: 0
1956 - more heads than before: 1+added heads (2..n)
1965 - more heads than before: 1+added heads (2..n)
1957 - less heads than before: -1-removed heads (-2..-n)
1966 - less heads than before: -1-removed heads (-2..-n)
1958 - number of heads stays the same: 1
1967 - number of heads stays the same: 1
1959 """
1968 """
1960 def csmap(x):
1969 def csmap(x):
1961 self.ui.debug(_("add changeset %s\n") % short(x))
1970 self.ui.debug(_("add changeset %s\n") % short(x))
1962 return len(cl)
1971 return len(cl)
1963
1972
1964 def revmap(x):
1973 def revmap(x):
1965 return cl.rev(x)
1974 return cl.rev(x)
1966
1975
1967 if not source:
1976 if not source:
1968 return 0
1977 return 0
1969
1978
1970 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1979 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1971
1980
1972 changesets = files = revisions = 0
1981 changesets = files = revisions = 0
1973
1982
1974 # write changelog data to temp files so concurrent readers will not see
1983 # write changelog data to temp files so concurrent readers will not see
1975 # inconsistent view
1984 # inconsistent view
1976 cl = self.changelog
1985 cl = self.changelog
1977 cl.delayupdate()
1986 cl.delayupdate()
1978 oldheads = len(cl.heads())
1987 oldheads = len(cl.heads())
1979
1988
1980 tr = self.transaction()
1989 tr = self.transaction()
1981 try:
1990 try:
1982 trp = weakref.proxy(tr)
1991 trp = weakref.proxy(tr)
1983 # pull off the changeset group
1992 # pull off the changeset group
1984 self.ui.status(_("adding changesets\n"))
1993 self.ui.status(_("adding changesets\n"))
1985 clstart = len(cl)
1994 clstart = len(cl)
1986 chunkiter = changegroup.chunkiter(source)
1995 chunkiter = changegroup.chunkiter(source)
1987 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1996 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1988 raise util.Abort(_("received changelog group is empty"))
1997 raise util.Abort(_("received changelog group is empty"))
1989 clend = len(cl)
1998 clend = len(cl)
1990 changesets = clend - clstart
1999 changesets = clend - clstart
1991
2000
1992 # pull off the manifest group
2001 # pull off the manifest group
1993 self.ui.status(_("adding manifests\n"))
2002 self.ui.status(_("adding manifests\n"))
1994 chunkiter = changegroup.chunkiter(source)
2003 chunkiter = changegroup.chunkiter(source)
1995 # no need to check for empty manifest group here:
2004 # no need to check for empty manifest group here:
1996 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2005 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1997 # no new manifest will be created and the manifest group will
2006 # no new manifest will be created and the manifest group will
1998 # be empty during the pull
2007 # be empty during the pull
1999 self.manifest.addgroup(chunkiter, revmap, trp)
2008 self.manifest.addgroup(chunkiter, revmap, trp)
2000
2009
2001 # process the files
2010 # process the files
2002 self.ui.status(_("adding file changes\n"))
2011 self.ui.status(_("adding file changes\n"))
2003 while 1:
2012 while 1:
2004 f = changegroup.getchunk(source)
2013 f = changegroup.getchunk(source)
2005 if not f:
2014 if not f:
2006 break
2015 break
2007 self.ui.debug(_("adding %s revisions\n") % f)
2016 self.ui.debug(_("adding %s revisions\n") % f)
2008 fl = self.file(f)
2017 fl = self.file(f)
2009 o = len(fl)
2018 o = len(fl)
2010 chunkiter = changegroup.chunkiter(source)
2019 chunkiter = changegroup.chunkiter(source)
2011 if fl.addgroup(chunkiter, revmap, trp) is None:
2020 if fl.addgroup(chunkiter, revmap, trp) is None:
2012 raise util.Abort(_("received file revlog group is empty"))
2021 raise util.Abort(_("received file revlog group is empty"))
2013 revisions += len(fl) - o
2022 revisions += len(fl) - o
2014 files += 1
2023 files += 1
2015
2024
2016 newheads = len(cl.heads())
2025 newheads = len(cl.heads())
2017 heads = ""
2026 heads = ""
2018 if oldheads and newheads != oldheads:
2027 if oldheads and newheads != oldheads:
2019 heads = _(" (%+d heads)") % (newheads - oldheads)
2028 heads = _(" (%+d heads)") % (newheads - oldheads)
2020
2029
2021 self.ui.status(_("added %d changesets"
2030 self.ui.status(_("added %d changesets"
2022 " with %d changes to %d files%s\n")
2031 " with %d changes to %d files%s\n")
2023 % (changesets, revisions, files, heads))
2032 % (changesets, revisions, files, heads))
2024
2033
2025 if changesets > 0:
2034 if changesets > 0:
2026 p = lambda: cl.writepending() and self.root or ""
2035 p = lambda: cl.writepending() and self.root or ""
2027 self.hook('pretxnchangegroup', throw=True,
2036 self.hook('pretxnchangegroup', throw=True,
2028 node=hex(cl.node(clstart)), source=srctype,
2037 node=hex(cl.node(clstart)), source=srctype,
2029 url=url, pending=p)
2038 url=url, pending=p)
2030
2039
2031 # make changelog see real files again
2040 # make changelog see real files again
2032 cl.finalize(trp)
2041 cl.finalize(trp)
2033
2042
2034 tr.close()
2043 tr.close()
2035 finally:
2044 finally:
2036 del tr
2045 del tr
2037
2046
2038 if changesets > 0:
2047 if changesets > 0:
2039 # forcefully update the on-disk branch cache
2048 # forcefully update the on-disk branch cache
2040 self.ui.debug(_("updating the branch cache\n"))
2049 self.ui.debug(_("updating the branch cache\n"))
2041 self.branchtags()
2050 self.branchtags()
2042 self.hook("changegroup", node=hex(cl.node(clstart)),
2051 self.hook("changegroup", node=hex(cl.node(clstart)),
2043 source=srctype, url=url)
2052 source=srctype, url=url)
2044
2053
2045 for i in xrange(clstart, clend):
2054 for i in xrange(clstart, clend):
2046 self.hook("incoming", node=hex(cl.node(i)),
2055 self.hook("incoming", node=hex(cl.node(i)),
2047 source=srctype, url=url)
2056 source=srctype, url=url)
2048
2057
2049 # never return 0 here:
2058 # never return 0 here:
2050 if newheads < oldheads:
2059 if newheads < oldheads:
2051 return newheads - oldheads - 1
2060 return newheads - oldheads - 1
2052 else:
2061 else:
2053 return newheads - oldheads + 1
2062 return newheads - oldheads + 1
2054
2063
2055
2064
2056 def stream_in(self, remote):
2065 def stream_in(self, remote):
2057 fp = remote.stream_out()
2066 fp = remote.stream_out()
2058 l = fp.readline()
2067 l = fp.readline()
2059 try:
2068 try:
2060 resp = int(l)
2069 resp = int(l)
2061 except ValueError:
2070 except ValueError:
2062 raise error.ResponseError(
2071 raise error.ResponseError(
2063 _('Unexpected response from remote server:'), l)
2072 _('Unexpected response from remote server:'), l)
2064 if resp == 1:
2073 if resp == 1:
2065 raise util.Abort(_('operation forbidden by server'))
2074 raise util.Abort(_('operation forbidden by server'))
2066 elif resp == 2:
2075 elif resp == 2:
2067 raise util.Abort(_('locking the remote repository failed'))
2076 raise util.Abort(_('locking the remote repository failed'))
2068 elif resp != 0:
2077 elif resp != 0:
2069 raise util.Abort(_('the server sent an unknown error code'))
2078 raise util.Abort(_('the server sent an unknown error code'))
2070 self.ui.status(_('streaming all changes\n'))
2079 self.ui.status(_('streaming all changes\n'))
2071 l = fp.readline()
2080 l = fp.readline()
2072 try:
2081 try:
2073 total_files, total_bytes = map(int, l.split(' ', 1))
2082 total_files, total_bytes = map(int, l.split(' ', 1))
2074 except (ValueError, TypeError):
2083 except (ValueError, TypeError):
2075 raise error.ResponseError(
2084 raise error.ResponseError(
2076 _('Unexpected response from remote server:'), l)
2085 _('Unexpected response from remote server:'), l)
2077 self.ui.status(_('%d files to transfer, %s of data\n') %
2086 self.ui.status(_('%d files to transfer, %s of data\n') %
2078 (total_files, util.bytecount(total_bytes)))
2087 (total_files, util.bytecount(total_bytes)))
2079 start = time.time()
2088 start = time.time()
2080 for i in xrange(total_files):
2089 for i in xrange(total_files):
2081 # XXX doesn't support '\n' or '\r' in filenames
2090 # XXX doesn't support '\n' or '\r' in filenames
2082 l = fp.readline()
2091 l = fp.readline()
2083 try:
2092 try:
2084 name, size = l.split('\0', 1)
2093 name, size = l.split('\0', 1)
2085 size = int(size)
2094 size = int(size)
2086 except (ValueError, TypeError):
2095 except (ValueError, TypeError):
2087 raise error.ResponseError(
2096 raise error.ResponseError(
2088 _('Unexpected response from remote server:'), l)
2097 _('Unexpected response from remote server:'), l)
2089 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2098 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2090 # for backwards compat, name was partially encoded
2099 # for backwards compat, name was partially encoded
2091 ofp = self.sopener(store.decodedir(name), 'w')
2100 ofp = self.sopener(store.decodedir(name), 'w')
2092 for chunk in util.filechunkiter(fp, limit=size):
2101 for chunk in util.filechunkiter(fp, limit=size):
2093 ofp.write(chunk)
2102 ofp.write(chunk)
2094 ofp.close()
2103 ofp.close()
2095 elapsed = time.time() - start
2104 elapsed = time.time() - start
2096 if elapsed <= 0:
2105 if elapsed <= 0:
2097 elapsed = 0.001
2106 elapsed = 0.001
2098 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2107 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2099 (util.bytecount(total_bytes), elapsed,
2108 (util.bytecount(total_bytes), elapsed,
2100 util.bytecount(total_bytes / elapsed)))
2109 util.bytecount(total_bytes / elapsed)))
2101 self.invalidate()
2110 self.invalidate()
2102 return len(self.heads()) + 1
2111 return len(self.heads()) + 1
2103
2112
2104 def clone(self, remote, heads=[], stream=False):
2113 def clone(self, remote, heads=[], stream=False):
2105 '''clone remote repository.
2114 '''clone remote repository.
2106
2115
2107 keyword arguments:
2116 keyword arguments:
2108 heads: list of revs to clone (forces use of pull)
2117 heads: list of revs to clone (forces use of pull)
2109 stream: use streaming clone if possible'''
2118 stream: use streaming clone if possible'''
2110
2119
2111 # now, all clients that can request uncompressed clones can
2120 # now, all clients that can request uncompressed clones can
2112 # read repo formats supported by all servers that can serve
2121 # read repo formats supported by all servers that can serve
2113 # them.
2122 # them.
2114
2123
2115 # if revlog format changes, client will have to check version
2124 # if revlog format changes, client will have to check version
2116 # and format flags on "stream" capability, and use
2125 # and format flags on "stream" capability, and use
2117 # uncompressed only if compatible.
2126 # uncompressed only if compatible.
2118
2127
2119 if stream and not heads and remote.capable('stream'):
2128 if stream and not heads and remote.capable('stream'):
2120 return self.stream_in(remote)
2129 return self.stream_in(remote)
2121 return self.pull(remote, heads)
2130 return self.pull(remote, heads)
2122
2131
2123 # used to avoid circular references so destructors work
2132 # used to avoid circular references so destructors work
2124 def aftertrans(files):
2133 def aftertrans(files):
2125 renamefiles = [tuple(t) for t in files]
2134 renamefiles = [tuple(t) for t in files]
2126 def a():
2135 def a():
2127 for src, dest in renamefiles:
2136 for src, dest in renamefiles:
2128 util.rename(src, dest)
2137 util.rename(src, dest)
2129 return a
2138 return a
2130
2139
2131 def instance(ui, path, create):
2140 def instance(ui, path, create):
2132 return localrepository(ui, util.drop_scheme('file', path), create)
2141 return localrepository(ui, util.drop_scheme('file', path), create)
2133
2142
2134 def islocal(path):
2143 def islocal(path):
2135 return True
2144 return True
@@ -1,122 +1,310 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 # Currently this module only deals with reading tags. Soon it will grow
9 # Currently this module only deals with reading and caching tags.
10 # support for caching tag info. Eventually, it could take care of
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # updating (adding/removing/moving) tags too.
11 # tags too.
12
12
13 from node import bin, hex
13 import os
14 from node import nullid, bin, hex, short
14 from i18n import _
15 from i18n import _
15 import encoding
16 import encoding
16 import error
17 import error
17
18
18 def findglobaltags(ui, repo, alltags, tagtypes):
19 def _debugalways(ui, *msg):
20 ui.write(*msg)
21
22 def _debugconditional(ui, *msg):
23 ui.debug(*msg)
24
25 def _debugnever(ui, *msg):
26 pass
27
28 _debug = _debugalways
29 _debug = _debugnever
30
31 def findglobaltags1(ui, repo, alltags, tagtypes):
19 '''Find global tags in repo by reading .hgtags from every head that
32 '''Find global tags in repo by reading .hgtags from every head that
20 has a distinct version of it. Updates the dicts alltags, tagtypes
33 has a distinct version of it. Updates the dicts alltags, tagtypes
21 in place: alltags maps tag name to (node, hist) pair (see _readtags()
34 in place: alltags maps tag name to (node, hist) pair (see _readtags()
22 below), and tagtypes maps tag name to tag type ('global' in this
35 below), and tagtypes maps tag name to tag type ('global' in this
23 case).'''
36 case).'''
24
37
25 seen = set()
38 seen = set()
26 fctx = None
39 fctx = None
27 ctxs = [] # list of filectx
40 ctxs = [] # list of filectx
28 for node in repo.heads():
41 for node in repo.heads():
29 try:
42 try:
30 fnode = repo[node].filenode('.hgtags')
43 fnode = repo[node].filenode('.hgtags')
31 except error.LookupError:
44 except error.LookupError:
32 continue
45 continue
33 if fnode not in seen:
46 if fnode not in seen:
34 seen.add(fnode)
47 seen.add(fnode)
35 if not fctx:
48 if not fctx:
36 fctx = repo.filectx('.hgtags', fileid=fnode)
49 fctx = repo.filectx('.hgtags', fileid=fnode)
37 else:
50 else:
38 fctx = fctx.filectx(fnode)
51 fctx = fctx.filectx(fnode)
39 ctxs.append(fctx)
52 ctxs.append(fctx)
40
53
41 # read the tags file from each head, ending with the tip
54 # read the tags file from each head, ending with the tip
42 for fctx in reversed(ctxs):
55 for fctx in reversed(ctxs):
43 filetags = _readtags(
56 filetags = _readtags(
44 ui, repo, fctx.data().splitlines(), fctx)
57 ui, repo, fctx.data().splitlines(), fctx)
45 _updatetags(filetags, "global", alltags, tagtypes)
58 _updatetags(filetags, "global", alltags, tagtypes)
46
59
60 def findglobaltags2(ui, repo, alltags, tagtypes):
61 '''Same as findglobaltags1(), but with caching.'''
62 (heads, tagfnode, shouldwrite) = _readtagcache(ui, repo)
63
64 _debug(ui, "reading tags from %d head(s): %s\n"
65 % (len(heads), map(short, reversed(heads))))
66 seen = set() # set of fnode
67 fctx = None
68 for head in reversed(heads): # oldest to newest
69 assert head in repo.changelog.nodemap, \
70 "tag cache returned bogus head %s" % short(head)
71
72 fnode = tagfnode.get(head)
73 if fnode and fnode not in seen:
74 seen.add(fnode)
75 if not fctx:
76 fctx = repo.filectx('.hgtags', fileid=fnode)
77 else:
78 fctx = fctx.filectx(fnode)
79
80 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
81 _updatetags(filetags, 'global', alltags, tagtypes)
82
83 # and update the cache (if necessary)
84 if shouldwrite:
85 _writetagcache(ui, repo, heads, tagfnode)
86
87 # Set this to findglobaltags1 to disable tag caching.
88 findglobaltags = findglobaltags2
89
47 def readlocaltags(ui, repo, alltags, tagtypes):
90 def readlocaltags(ui, repo, alltags, tagtypes):
48 '''Read local tags in repo. Update alltags and tagtypes.'''
91 '''Read local tags in repo. Update alltags and tagtypes.'''
49 try:
92 try:
50 data = encoding.fromlocal(repo.opener("localtags").read())
93 data = encoding.fromlocal(repo.opener("localtags").read())
51 # localtags are stored in the local character set
94 # localtags are stored in the local character set
52 # while the internal tag table is stored in UTF-8
95 # while the internal tag table is stored in UTF-8
53 filetags = _readtags(
96 filetags = _readtags(
54 ui, repo, data.splitlines(), "localtags")
97 ui, repo, data.splitlines(), "localtags")
55 _updatetags(filetags, "local", alltags, tagtypes)
98 _updatetags(filetags, "local", alltags, tagtypes)
56 except IOError:
99 except IOError:
57 pass
100 pass
58
101
59 def _readtags(ui, repo, lines, fn):
102 def _readtags(ui, repo, lines, fn):
60 '''Read tag definitions from a file (or any source of lines).
103 '''Read tag definitions from a file (or any source of lines).
61 Return a mapping from tag name to (node, hist): node is the node id
104 Return a mapping from tag name to (node, hist): node is the node id
62 from the last line read for that name, and hist is the list of node
105 from the last line read for that name, and hist is the list of node
63 ids previously associated with it (in file order). All node ids are
106 ids previously associated with it (in file order). All node ids are
64 binary, not hex.'''
107 binary, not hex.'''
65
108
66 filetags = {} # map tag name to (node, hist)
109 filetags = {} # map tag name to (node, hist)
67 count = 0
110 count = 0
68
111
69 def warn(msg):
112 def warn(msg):
70 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
113 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
71
114
72 for line in lines:
115 for line in lines:
73 count += 1
116 count += 1
74 if not line:
117 if not line:
75 continue
118 continue
76 try:
119 try:
77 (nodehex, name) = line.split(" ", 1)
120 (nodehex, name) = line.split(" ", 1)
78 except ValueError:
121 except ValueError:
79 warn(_("cannot parse entry"))
122 warn(_("cannot parse entry"))
80 continue
123 continue
81 name = encoding.tolocal(name.strip()) # stored in UTF-8
124 name = encoding.tolocal(name.strip()) # stored in UTF-8
82 try:
125 try:
83 nodebin = bin(nodehex)
126 nodebin = bin(nodehex)
84 except TypeError:
127 except TypeError:
85 warn(_("node '%s' is not well formed") % nodehex)
128 warn(_("node '%s' is not well formed") % nodehex)
86 continue
129 continue
87 if nodebin not in repo.changelog.nodemap:
130 if nodebin not in repo.changelog.nodemap:
88 # silently ignore as pull -r might cause this
131 # silently ignore as pull -r might cause this
89 continue
132 continue
90
133
91 # update filetags
134 # update filetags
92 hist = []
135 hist = []
93 if name in filetags:
136 if name in filetags:
94 n, hist = filetags[name]
137 n, hist = filetags[name]
95 hist.append(n)
138 hist.append(n)
96 filetags[name] = (nodebin, hist)
139 filetags[name] = (nodebin, hist)
97 return filetags
140 return filetags
98
141
99 def _updatetags(filetags, tagtype, alltags, tagtypes):
142 def _updatetags(filetags, tagtype, alltags, tagtypes):
100 '''Incorporate the tag info read from one file into the two
143 '''Incorporate the tag info read from one file into the two
101 dictionaries, alltags and tagtypes, that contain all tag
144 dictionaries, alltags and tagtypes, that contain all tag
102 info (global across all heads plus local).'''
145 info (global across all heads plus local).'''
103
146
104 for name, nodehist in filetags.iteritems():
147 for name, nodehist in filetags.iteritems():
105 if name not in alltags:
148 if name not in alltags:
106 alltags[name] = nodehist
149 alltags[name] = nodehist
107 tagtypes[name] = tagtype
150 tagtypes[name] = tagtype
108 continue
151 continue
109
152
110 # we prefer alltags[name] if:
153 # we prefer alltags[name] if:
111 # it supercedes us OR
154 # it supercedes us OR
112 # mutual supercedes and it has a higher rank
155 # mutual supercedes and it has a higher rank
113 # otherwise we win because we're tip-most
156 # otherwise we win because we're tip-most
114 anode, ahist = nodehist
157 anode, ahist = nodehist
115 bnode, bhist = alltags[name]
158 bnode, bhist = alltags[name]
116 if (bnode != anode and anode in bhist and
159 if (bnode != anode and anode in bhist and
117 (bnode not in ahist or len(bhist) > len(ahist))):
160 (bnode not in ahist or len(bhist) > len(ahist))):
118 anode = bnode
161 anode = bnode
119 ahist.extend([n for n in bhist if n not in ahist])
162 ahist.extend([n for n in bhist if n not in ahist])
120 alltags[name] = anode, ahist
163 alltags[name] = anode, ahist
121 tagtypes[name] = tagtype
164 tagtypes[name] = tagtype
122
165
166
167 # The tag cache only stores info about heads, not the tag contents
168 # from each head. I.e. it doesn't try to squeeze out the maximum
169 # performance, but is simpler has a better chance of actually
170 # working correctly. And this gives the biggest performance win: it
171 # avoids looking up .hgtags in the manifest for every head, and it
172 # can avoid calling heads() at all if there have been no changes to
173 # the repo.
174
175 def _readtagcache(ui, repo):
176 '''Read the tag cache and return a tuple (heads, fnodes,
177 shouldwrite). heads is the list of all heads currently in the
178 repository (ordered from tip to oldest) and fnodes is a mapping from
179 head to .hgtags filenode. Caller is responsible for reading tag
180 info from each head.'''
181
182 try:
183 cachefile = repo.opener('tags.cache', 'r')
184 _debug(ui, 'reading tag cache from %s\n' % cachefile.name)
185 except IOError:
186 cachefile = None
187
188 # The cache file consists of lines like
189 # <headrev> <headnode> [<tagnode>]
190 # where <headrev> and <headnode> redundantly identify a repository
191 # head from the time the cache was written, and <tagnode> is the
192 # filenode of .hgtags on that head. Heads with no .hgtags file will
193 # have no <tagnode>. The cache is ordered from tip to oldest (which
194 # is part of why <headrev> is there: a quick visual check is all
195 # that's required to ensure correct order).
196 #
197 # This information is enough to let us avoid the most expensive part
198 # of finding global tags, which is looking up <tagnode> in the
199 # manifest for each head.
200 cacherevs = [] # list of headrev
201 cacheheads = [] # list of headnode
202 cachefnode = {} # map headnode to filenode
203 if cachefile:
204 for line in cachefile:
205 line = line.rstrip().split()
206 cacherevs.append(int(line[0]))
207 headnode = bin(line[1])
208 cacheheads.append(headnode)
209 if len(line) == 3:
210 fnode = bin(line[2])
211 cachefnode[headnode] = fnode
212
213 cachefile.close()
214
215 tipnode = repo.changelog.tip()
216 tiprev = len(repo.changelog) - 1
217
218 # Case 1 (common): tip is the same, so nothing has changed.
219 # (Unchanged tip trivially means no changesets have been added.
220 # But, thanks to localrepository.destroyed(), it also means none
221 # have been destroyed by strip or rollback.)
222 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
223 _debug(ui, "tag cache: tip unchanged\n")
224 return (cacheheads, cachefnode, False)
225
226 repoheads = repo.heads()
227
228 # Case 2 (uncommon): empty repo; get out quickly and don't bother
229 # writing an empty cache.
230 if repoheads == [nullid]:
231 return ([], {}, False)
232
233 # Case 3 (uncommon): cache file missing or empty.
234 if not cacheheads:
235 _debug(ui, 'tag cache: cache file missing or empty\n')
236
237 # Case 4 (uncommon): tip rev decreased. This should only happen
238 # when we're called from localrepository.destroyed(). Refresh the
239 # cache so future invocations will not see disappeared heads in the
240 # cache.
241 elif cacheheads and tiprev < cacherevs[0]:
242 _debug(ui,
243 'tag cache: tip rev decremented (from %d to %d), '
244 'so we must be destroying nodes\n'
245 % (cacherevs[0], tiprev))
246
247 # Case 5 (common): tip has changed, so we've added/replaced heads.
248 else:
249 _debug(ui,
250 'tag cache: tip has changed (%d:%s); must find new heads\n'
251 % (tiprev, short(tipnode)))
252
253 # Luckily, the code to handle cases 3, 4, 5 is the same. So the
254 # above if/elif/else can disappear once we're confident this thing
255 # actually works and we don't need the debug output.
256
257 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
258 # exposed".
259 newheads = [head
260 for head in repoheads
261 if head not in set(cacheheads)]
262 _debug(ui, 'tag cache: found %d head(s) not in cache: %s\n'
263 % (len(newheads), map(short, newheads)))
264
265 # Now we have to lookup the .hgtags filenode for every new head.
266 # This is the most expensive part of finding tags, so performance
267 # depends primarily on the size of newheads. Worst case: no cache
268 # file, so newheads == repoheads.
269 for head in newheads:
270 cctx = repo[head]
271 try:
272 fnode = cctx.filenode('.hgtags')
273 cachefnode[head] = fnode
274 except error.LookupError:
275 # no .hgtags file on this head
276 pass
277
278 # Caller has to iterate over all heads, but can use the filenodes in
279 # cachefnode to get to each .hgtags revision quickly.
280 return (repoheads, cachefnode, True)
281
282 def _writetagcache(ui, repo, heads, tagfnode):
283
284 cachefile = repo.opener('tags.cache', 'w', atomictemp=True)
285 _debug(ui, 'writing cache file %s\n' % cachefile.name)
286
287 realheads = repo.heads() # for sanity checks below
288 for head in heads:
289 # temporary sanity checks; these can probably be removed
290 # once this code has been in crew for a few weeks
291 assert head in repo.changelog.nodemap, \
292 'trying to write non-existent node %s to tag cache' % short(head)
293 assert head in realheads, \
294 'trying to write non-head %s to tag cache' % short(head)
295 assert head != nullid, \
296 'trying to write nullid to tag cache'
297
298 # This can't fail because of the first assert above. When/if we
299 # remove that assert, we might want to catch LookupError here
300 # and downgrade it to a warning.
301 rev = repo.changelog.rev(head)
302
303 fnode = tagfnode.get(head)
304 if fnode:
305 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
306 else:
307 cachefile.write('%d %s\n' % (rev, hex(head)))
308
309 cachefile.rename()
310 cachefile.close()
@@ -1,517 +1,526 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 checkundo()
3 checkundo()
4 {
4 {
5 if [ -f .hg/store/undo ]; then
5 if [ -f .hg/store/undo ]; then
6 echo ".hg/store/undo still exists after $1"
6 echo ".hg/store/undo still exists after $1"
7 fi
7 fi
8 }
8 }
9
9
10 echo "[extensions]" >> $HGRCPATH
10 echo "[extensions]" >> $HGRCPATH
11 echo "mq=" >> $HGRCPATH
11 echo "mq=" >> $HGRCPATH
12
12
13 echo % help
13 echo % help
14 hg help mq
14 hg help mq
15
15
16 hg init a
16 hg init a
17 cd a
17 cd a
18 echo a > a
18 echo a > a
19 hg ci -Ama
19 hg ci -Ama
20
20
21 hg clone . ../k
21 hg clone . ../k
22
22
23 mkdir b
23 mkdir b
24 echo z > b/z
24 echo z > b/z
25 hg ci -Ama
25 hg ci -Ama
26
26
27 echo % qinit
27 echo % qinit
28
28
29 hg qinit
29 hg qinit
30
30
31 cd ..
31 cd ..
32 hg init b
32 hg init b
33
33
34 echo % -R qinit
34 echo % -R qinit
35
35
36 hg -R b qinit
36 hg -R b qinit
37
37
38 hg init c
38 hg init c
39
39
40 echo % qinit -c
40 echo % qinit -c
41
41
42 hg --cwd c qinit -c
42 hg --cwd c qinit -c
43 hg -R c/.hg/patches st
43 hg -R c/.hg/patches st
44
44
45 echo '% qinit; qinit -c'
45 echo '% qinit; qinit -c'
46 hg init d
46 hg init d
47 cd d
47 cd d
48 hg qinit
48 hg qinit
49 hg qinit -c
49 hg qinit -c
50 # qinit -c should create both files if they don't exist
50 # qinit -c should create both files if they don't exist
51 echo ' .hgignore:'
51 echo ' .hgignore:'
52 cat .hg/patches/.hgignore
52 cat .hg/patches/.hgignore
53 echo ' series:'
53 echo ' series:'
54 cat .hg/patches/series
54 cat .hg/patches/series
55 hg qinit -c 2>&1 | sed -e 's/repository.*already/repository already/'
55 hg qinit -c 2>&1 | sed -e 's/repository.*already/repository already/'
56 cd ..
56 cd ..
57
57
58 echo '% qinit; <stuff>; qinit -c'
58 echo '% qinit; <stuff>; qinit -c'
59 hg init e
59 hg init e
60 cd e
60 cd e
61 hg qnew A
61 hg qnew A
62 checkundo qnew
62 checkundo qnew
63 echo foo > foo
63 echo foo > foo
64 hg add foo
64 hg add foo
65 hg qrefresh
65 hg qrefresh
66 hg qnew B
66 hg qnew B
67 echo >> foo
67 echo >> foo
68 hg qrefresh
68 hg qrefresh
69 echo status >> .hg/patches/.hgignore
69 echo status >> .hg/patches/.hgignore
70 echo bleh >> .hg/patches/.hgignore
70 echo bleh >> .hg/patches/.hgignore
71 hg qinit -c
71 hg qinit -c
72 hg -R .hg/patches status
72 hg -R .hg/patches status
73 # qinit -c shouldn't touch these files if they already exist
73 # qinit -c shouldn't touch these files if they already exist
74 echo ' .hgignore:'
74 echo ' .hgignore:'
75 cat .hg/patches/.hgignore
75 cat .hg/patches/.hgignore
76 echo ' series:'
76 echo ' series:'
77 cat .hg/patches/series
77 cat .hg/patches/series
78 cd ..
78 cd ..
79
79
80 cd a
80 cd a
81
81
82 hg qnew -m 'foo bar' test.patch
82 hg qnew -m 'foo bar' test.patch
83
83
84 echo % qrefresh
84 echo % qrefresh
85
85
86 echo a >> a
86 echo a >> a
87 hg qrefresh
87 hg qrefresh
88 sed -e "s/^\(diff -r \)\([a-f0-9]* \)/\1 x/" \
88 sed -e "s/^\(diff -r \)\([a-f0-9]* \)/\1 x/" \
89 -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
89 -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
90 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/test.patch
90 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/test.patch
91
91
92 echo % empty qrefresh
92 echo % empty qrefresh
93
93
94 hg qrefresh -X a
94 hg qrefresh -X a
95 echo 'revision:'
95 echo 'revision:'
96 hg diff -r -2 -r -1
96 hg diff -r -2 -r -1
97 echo 'patch:'
97 echo 'patch:'
98 cat .hg/patches/test.patch
98 cat .hg/patches/test.patch
99 echo 'working dir diff:'
99 echo 'working dir diff:'
100 hg diff --nodates -q
100 hg diff --nodates -q
101 # restore things
101 # restore things
102 hg qrefresh
102 hg qrefresh
103 checkundo qrefresh
103 checkundo qrefresh
104
104
105 echo % qpop
105 echo % qpop
106
106
107 hg qpop
107 hg qpop
108 checkundo qpop
108 checkundo qpop
109
109
110 echo % qpush
110 echo % qpush with dump of tag cache
111
111
112 # Dump the tag cache to ensure that it has exactly one head after qpush.
113 rm -f .hg/tags.cache
114 hg tags > /dev/null
115 echo ".hg/tags.cache (pre qpush):"
116 sed 's/ [0-9a-f]*//' .hg/tags.cache
112 hg qpush
117 hg qpush
118 hg tags > /dev/null
119 echo ".hg/tags.cache (post qpush):"
120 sed 's/ [0-9a-f]*//' .hg/tags.cache
121
113 checkundo qpush
122 checkundo qpush
114
123
115 cd ..
124 cd ..
116
125
117 echo % pop/push outside repo
126 echo % pop/push outside repo
118
127
119 hg -R a qpop
128 hg -R a qpop
120 hg -R a qpush
129 hg -R a qpush
121
130
122 cd a
131 cd a
123 hg qnew test2.patch
132 hg qnew test2.patch
124
133
125 echo % qrefresh in subdir
134 echo % qrefresh in subdir
126
135
127 cd b
136 cd b
128 echo a > a
137 echo a > a
129 hg add a
138 hg add a
130 hg qrefresh
139 hg qrefresh
131
140
132 echo % pop/push -a in subdir
141 echo % pop/push -a in subdir
133
142
134 hg qpop -a
143 hg qpop -a
135 hg --traceback qpush -a
144 hg --traceback qpush -a
136
145
137 echo % qseries
146 echo % qseries
138 hg qseries
147 hg qseries
139 hg qpop
148 hg qpop
140 hg qseries -vs
149 hg qseries -vs
141 hg qpush
150 hg qpush
142
151
143 echo % qapplied
152 echo % qapplied
144 hg qapplied
153 hg qapplied
145
154
146 echo % qtop
155 echo % qtop
147 hg qtop
156 hg qtop
148
157
149 echo % qprev
158 echo % qprev
150 hg qprev
159 hg qprev
151
160
152 echo % qnext
161 echo % qnext
153 hg qnext
162 hg qnext
154
163
155 echo % pop, qnext, qprev, qapplied
164 echo % pop, qnext, qprev, qapplied
156 hg qpop
165 hg qpop
157 hg qnext
166 hg qnext
158 hg qprev
167 hg qprev
159 hg qapplied
168 hg qapplied
160
169
161 echo % commit should fail
170 echo % commit should fail
162 hg commit
171 hg commit
163
172
164 echo % push should fail
173 echo % push should fail
165 hg push ../../k
174 hg push ../../k
166
175
167 echo % import should fail
176 echo % import should fail
168 hg st .
177 hg st .
169 echo foo >> ../a
178 echo foo >> ../a
170 hg diff > ../../import.diff
179 hg diff > ../../import.diff
171 hg revert --no-backup ../a
180 hg revert --no-backup ../a
172 hg import ../../import.diff
181 hg import ../../import.diff
173 hg st
182 hg st
174
183
175 echo % qunapplied
184 echo % qunapplied
176 hg qunapplied
185 hg qunapplied
177
186
178 echo % qpush/qpop with index
187 echo % qpush/qpop with index
179 hg qnew test1b.patch
188 hg qnew test1b.patch
180 echo 1b > 1b
189 echo 1b > 1b
181 hg add 1b
190 hg add 1b
182 hg qrefresh
191 hg qrefresh
183 hg qpush 2
192 hg qpush 2
184 hg qpop 0
193 hg qpop 0
185 hg qpush test.patch+1
194 hg qpush test.patch+1
186 hg qpush test.patch+2
195 hg qpush test.patch+2
187 hg qpop test2.patch-1
196 hg qpop test2.patch-1
188 hg qpop test2.patch-2
197 hg qpop test2.patch-2
189 hg qpush test1b.patch+1
198 hg qpush test1b.patch+1
190
199
191 echo % push should succeed
200 echo % push should succeed
192 hg qpop -a
201 hg qpop -a
193 hg push ../../k
202 hg push ../../k
194
203
195 echo % qpush/qpop error codes
204 echo % qpush/qpop error codes
196 errorcode()
205 errorcode()
197 {
206 {
198 hg "$@" && echo " $@ succeeds" || echo " $@ fails"
207 hg "$@" && echo " $@ succeeds" || echo " $@ fails"
199 }
208 }
200
209
201 # we want to start with some patches applied
210 # we want to start with some patches applied
202 hg qpush -a
211 hg qpush -a
203 echo " % pops all patches and succeeds"
212 echo " % pops all patches and succeeds"
204 errorcode qpop -a
213 errorcode qpop -a
205 echo " % does nothing and succeeds"
214 echo " % does nothing and succeeds"
206 errorcode qpop -a
215 errorcode qpop -a
207 echo " % fails - nothing else to pop"
216 echo " % fails - nothing else to pop"
208 errorcode qpop
217 errorcode qpop
209 echo " % pushes a patch and succeeds"
218 echo " % pushes a patch and succeeds"
210 errorcode qpush
219 errorcode qpush
211 echo " % pops a patch and succeeds"
220 echo " % pops a patch and succeeds"
212 errorcode qpop
221 errorcode qpop
213 echo " % pushes up to test1b.patch and succeeds"
222 echo " % pushes up to test1b.patch and succeeds"
214 errorcode qpush test1b.patch
223 errorcode qpush test1b.patch
215 echo " % does nothing and succeeds"
224 echo " % does nothing and succeeds"
216 errorcode qpush test1b.patch
225 errorcode qpush test1b.patch
217 echo " % does nothing and succeeds"
226 echo " % does nothing and succeeds"
218 errorcode qpop test1b.patch
227 errorcode qpop test1b.patch
219 echo " % fails - can't push to this patch"
228 echo " % fails - can't push to this patch"
220 errorcode qpush test.patch
229 errorcode qpush test.patch
221 echo " % fails - can't pop to this patch"
230 echo " % fails - can't pop to this patch"
222 errorcode qpop test2.patch
231 errorcode qpop test2.patch
223 echo " % pops up to test.patch and succeeds"
232 echo " % pops up to test.patch and succeeds"
224 errorcode qpop test.patch
233 errorcode qpop test.patch
225 echo " % pushes all patches and succeeds"
234 echo " % pushes all patches and succeeds"
226 errorcode qpush -a
235 errorcode qpush -a
227 echo " % does nothing and succeeds"
236 echo " % does nothing and succeeds"
228 errorcode qpush -a
237 errorcode qpush -a
229 echo " % fails - nothing else to push"
238 echo " % fails - nothing else to push"
230 errorcode qpush
239 errorcode qpush
231 echo " % does nothing and succeeds"
240 echo " % does nothing and succeeds"
232 errorcode qpush test2.patch
241 errorcode qpush test2.patch
233
242
234
243
235 echo % strip
244 echo % strip
236 cd ../../b
245 cd ../../b
237 echo x>x
246 echo x>x
238 hg ci -Ama
247 hg ci -Ama
239 hg strip tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
248 hg strip tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
240 hg unbundle .hg/strip-backup/*
249 hg unbundle .hg/strip-backup/*
241
250
242 echo % strip with local changes, should complain
251 echo % strip with local changes, should complain
243 hg up
252 hg up
244 echo y>y
253 echo y>y
245 hg add y
254 hg add y
246 hg strip tip | sed 's/\(saving bundle to \).*/\1/'
255 hg strip tip | sed 's/\(saving bundle to \).*/\1/'
247 echo % --force strip with local changes
256 echo % --force strip with local changes
248 hg strip -f tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
257 hg strip -f tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
249
258
250 echo '% cd b; hg qrefresh'
259 echo '% cd b; hg qrefresh'
251 hg init refresh
260 hg init refresh
252 cd refresh
261 cd refresh
253 echo a > a
262 echo a > a
254 hg ci -Ama
263 hg ci -Ama
255 hg qnew -mfoo foo
264 hg qnew -mfoo foo
256 echo a >> a
265 echo a >> a
257 hg qrefresh
266 hg qrefresh
258 mkdir b
267 mkdir b
259 cd b
268 cd b
260 echo f > f
269 echo f > f
261 hg add f
270 hg add f
262 hg qrefresh
271 hg qrefresh
263 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
272 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
264 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
273 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
265 echo % hg qrefresh .
274 echo % hg qrefresh .
266 hg qrefresh .
275 hg qrefresh .
267 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
276 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
268 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
277 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
269 hg status
278 hg status
270
279
271 echo % qpush failure
280 echo % qpush failure
272 cd ..
281 cd ..
273 hg qrefresh
282 hg qrefresh
274 hg qnew -mbar bar
283 hg qnew -mbar bar
275 echo foo > foo
284 echo foo > foo
276 echo bar > bar
285 echo bar > bar
277 hg add foo bar
286 hg add foo bar
278 hg qrefresh
287 hg qrefresh
279 hg qpop -a
288 hg qpop -a
280 echo bar > foo
289 echo bar > foo
281 hg qpush -a
290 hg qpush -a
282 hg st
291 hg st
283
292
284 echo % mq tags
293 echo % mq tags
285 hg log --template '{rev} {tags}\n' -r qparent:qtip
294 hg log --template '{rev} {tags}\n' -r qparent:qtip
286
295
287 echo % bad node in status
296 echo % bad node in status
288 hg qpop
297 hg qpop
289 hg strip -qn tip
298 hg strip -qn tip
290 hg tip 2>&1 | sed -e 's/unknown node .*/unknown node/'
299 hg tip 2>&1 | sed -e 's/unknown node .*/unknown node/'
291 hg branches 2>&1 | sed -e 's/unknown node .*/unknown node/'
300 hg branches 2>&1 | sed -e 's/unknown node .*/unknown node/'
292 hg qpop 2>&1 | sed -e 's/unknown node .*/unknown node/'
301 hg qpop 2>&1 | sed -e 's/unknown node .*/unknown node/'
293
302
294 cat >>$HGRCPATH <<EOF
303 cat >>$HGRCPATH <<EOF
295 [diff]
304 [diff]
296 git = True
305 git = True
297 EOF
306 EOF
298 cd ..
307 cd ..
299 hg init git
308 hg init git
300 cd git
309 cd git
301 hg qinit
310 hg qinit
302
311
303 hg qnew -m'new file' new
312 hg qnew -m'new file' new
304 echo foo > new
313 echo foo > new
305 chmod +x new
314 chmod +x new
306 hg add new
315 hg add new
307 hg qrefresh
316 hg qrefresh
308 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
317 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
309 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/new
318 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/new
310
319
311 hg qnew -m'copy file' copy
320 hg qnew -m'copy file' copy
312 hg cp new copy
321 hg cp new copy
313 hg qrefresh
322 hg qrefresh
314 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
323 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
315 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/copy
324 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/copy
316
325
317 hg qpop
326 hg qpop
318 hg qpush
327 hg qpush
319 hg qdiff
328 hg qdiff
320 cat >>$HGRCPATH <<EOF
329 cat >>$HGRCPATH <<EOF
321 [diff]
330 [diff]
322 git = False
331 git = False
323 EOF
332 EOF
324 hg qdiff --git
333 hg qdiff --git
325
334
326 cd ..
335 cd ..
327 hg init slow
336 hg init slow
328 cd slow
337 cd slow
329 hg qinit
338 hg qinit
330 echo foo > foo
339 echo foo > foo
331 hg add foo
340 hg add foo
332 hg ci -m 'add foo'
341 hg ci -m 'add foo'
333 hg qnew bar
342 hg qnew bar
334 echo bar > bar
343 echo bar > bar
335 hg add bar
344 hg add bar
336 hg mv foo baz
345 hg mv foo baz
337 hg qrefresh --git
346 hg qrefresh --git
338 hg up -C 0
347 hg up -C 0
339 echo >> foo
348 echo >> foo
340 hg ci -m 'change foo'
349 hg ci -m 'change foo'
341 hg up -C 1
350 hg up -C 1
342 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
351 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
343 cat .hg/patches/bar
352 cat .hg/patches/bar
344 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
353 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
345 hg qrefresh --git
354 hg qrefresh --git
346 cat .hg/patches/bar
355 cat .hg/patches/bar
347 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
356 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
348 hg qrefresh
357 hg qrefresh
349 grep 'diff --git' .hg/patches/bar
358 grep 'diff --git' .hg/patches/bar
350
359
351 echo
360 echo
352 hg up -C 1
361 hg up -C 1
353 echo >> foo
362 echo >> foo
354 hg ci -m 'change foo again'
363 hg ci -m 'change foo again'
355 hg up -C 2
364 hg up -C 2
356 hg mv bar quux
365 hg mv bar quux
357 hg mv baz bleh
366 hg mv baz bleh
358 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
367 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
359 cat .hg/patches/bar
368 cat .hg/patches/bar
360 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
369 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
361 hg mv quux fred
370 hg mv quux fred
362 hg mv bleh barney
371 hg mv bleh barney
363 hg qrefresh --git
372 hg qrefresh --git
364 cat .hg/patches/bar
373 cat .hg/patches/bar
365 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
374 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
366
375
367 echo % refresh omitting an added file
376 echo % refresh omitting an added file
368 hg qnew baz
377 hg qnew baz
369 echo newfile > newfile
378 echo newfile > newfile
370 hg add newfile
379 hg add newfile
371 hg qrefresh
380 hg qrefresh
372 hg st -A newfile
381 hg st -A newfile
373 hg qrefresh -X newfile
382 hg qrefresh -X newfile
374 hg st -A newfile
383 hg st -A newfile
375 hg revert newfile
384 hg revert newfile
376 rm newfile
385 rm newfile
377 hg qpop
386 hg qpop
378 hg qdel baz
387 hg qdel baz
379
388
380 echo % create a git patch
389 echo % create a git patch
381 echo a > alexander
390 echo a > alexander
382 hg add alexander
391 hg add alexander
383 hg qnew -f --git addalexander
392 hg qnew -f --git addalexander
384 grep diff .hg/patches/addalexander
393 grep diff .hg/patches/addalexander
385
394
386 echo % create a git binary patch
395 echo % create a git binary patch
387 cat > writebin.py <<EOF
396 cat > writebin.py <<EOF
388 import sys
397 import sys
389 path = sys.argv[1]
398 path = sys.argv[1]
390 open(path, 'wb').write('BIN\x00ARY')
399 open(path, 'wb').write('BIN\x00ARY')
391 EOF
400 EOF
392 python writebin.py bucephalus
401 python writebin.py bucephalus
393
402
394 python "$TESTDIR/md5sum.py" bucephalus
403 python "$TESTDIR/md5sum.py" bucephalus
395 hg add bucephalus
404 hg add bucephalus
396 hg qnew -f --git addbucephalus
405 hg qnew -f --git addbucephalus
397 grep diff .hg/patches/addbucephalus
406 grep diff .hg/patches/addbucephalus
398
407
399 echo % check binary patches can be popped and pushed
408 echo % check binary patches can be popped and pushed
400 hg qpop
409 hg qpop
401 test -f bucephalus && echo % bucephalus should not be there
410 test -f bucephalus && echo % bucephalus should not be there
402 hg qpush
411 hg qpush
403 test -f bucephalus || echo % bucephalus should be there
412 test -f bucephalus || echo % bucephalus should be there
404 python "$TESTDIR/md5sum.py" bucephalus
413 python "$TESTDIR/md5sum.py" bucephalus
405
414
406
415
407 echo '% strip again'
416 echo '% strip again'
408 cd ..
417 cd ..
409 hg init strip
418 hg init strip
410 cd strip
419 cd strip
411 touch foo
420 touch foo
412 hg add foo
421 hg add foo
413 hg ci -m 'add foo'
422 hg ci -m 'add foo'
414 echo >> foo
423 echo >> foo
415 hg ci -m 'change foo 1'
424 hg ci -m 'change foo 1'
416 hg up -C 0
425 hg up -C 0
417 echo 1 >> foo
426 echo 1 >> foo
418 hg ci -m 'change foo 2'
427 hg ci -m 'change foo 2'
419 HGMERGE=true hg merge
428 HGMERGE=true hg merge
420 hg ci -m merge
429 hg ci -m merge
421 hg log
430 hg log
422 hg strip 1 2>&1 | sed 's/\(saving bundle to \).*/\1/'
431 hg strip 1 2>&1 | sed 's/\(saving bundle to \).*/\1/'
423 checkundo strip
432 checkundo strip
424 hg log
433 hg log
425 cd ..
434 cd ..
426
435
427 echo '% qclone'
436 echo '% qclone'
428 qlog()
437 qlog()
429 {
438 {
430 echo 'main repo:'
439 echo 'main repo:'
431 hg log --template ' rev {rev}: {desc}\n'
440 hg log --template ' rev {rev}: {desc}\n'
432 echo 'patch repo:'
441 echo 'patch repo:'
433 hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
442 hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
434 }
443 }
435 hg init qclonesource
444 hg init qclonesource
436 cd qclonesource
445 cd qclonesource
437 echo foo > foo
446 echo foo > foo
438 hg add foo
447 hg add foo
439 hg ci -m 'add foo'
448 hg ci -m 'add foo'
440 hg qinit
449 hg qinit
441 hg qnew patch1
450 hg qnew patch1
442 echo bar >> foo
451 echo bar >> foo
443 hg qrefresh -m 'change foo'
452 hg qrefresh -m 'change foo'
444 cd ..
453 cd ..
445
454
446 # repo with unversioned patch dir
455 # repo with unversioned patch dir
447 hg qclone qclonesource failure
456 hg qclone qclonesource failure
448
457
449 cd qclonesource
458 cd qclonesource
450 hg qinit -c
459 hg qinit -c
451 hg qci -m checkpoint
460 hg qci -m checkpoint
452 qlog
461 qlog
453 cd ..
462 cd ..
454
463
455 # repo with patches applied
464 # repo with patches applied
456 hg qclone qclonesource qclonedest
465 hg qclone qclonesource qclonedest
457 cd qclonedest
466 cd qclonedest
458 qlog
467 qlog
459 cd ..
468 cd ..
460
469
461 # repo with patches unapplied
470 # repo with patches unapplied
462 cd qclonesource
471 cd qclonesource
463 hg qpop -a
472 hg qpop -a
464 qlog
473 qlog
465 cd ..
474 cd ..
466 hg qclone qclonesource qclonedest2
475 hg qclone qclonesource qclonedest2
467 cd qclonedest2
476 cd qclonedest2
468 qlog
477 qlog
469 cd ..
478 cd ..
470
479
471 echo % 'test applying on an empty file (issue 1033)'
480 echo % 'test applying on an empty file (issue 1033)'
472 hg init empty
481 hg init empty
473 cd empty
482 cd empty
474 touch a
483 touch a
475 hg ci -Am addempty
484 hg ci -Am addempty
476 echo a > a
485 echo a > a
477 hg qnew -f -e changea
486 hg qnew -f -e changea
478 hg qpop
487 hg qpop
479 hg qpush
488 hg qpush
480 cd ..
489 cd ..
481
490
482 echo % test qpush with --force, issue1087
491 echo % test qpush with --force, issue1087
483 hg init forcepush
492 hg init forcepush
484 cd forcepush
493 cd forcepush
485 echo hello > hello.txt
494 echo hello > hello.txt
486 echo bye > bye.txt
495 echo bye > bye.txt
487 hg ci -Ama
496 hg ci -Ama
488 hg qnew -d '0 0' empty
497 hg qnew -d '0 0' empty
489 hg qpop
498 hg qpop
490 echo world >> hello.txt
499 echo world >> hello.txt
491
500
492 echo % qpush should fail, local changes
501 echo % qpush should fail, local changes
493 hg qpush
502 hg qpush
494
503
495 echo % apply force, should not discard changes with empty patch
504 echo % apply force, should not discard changes with empty patch
496 hg qpush -f 2>&1 | sed 's,^.*/patch,patch,g'
505 hg qpush -f 2>&1 | sed 's,^.*/patch,patch,g'
497 hg diff --config diff.nodates=True
506 hg diff --config diff.nodates=True
498 hg qdiff --config diff.nodates=True
507 hg qdiff --config diff.nodates=True
499 hg log -l1 -p
508 hg log -l1 -p
500 hg qref -d '0 0'
509 hg qref -d '0 0'
501 hg qpop
510 hg qpop
502 echo universe >> hello.txt
511 echo universe >> hello.txt
503 echo universe >> bye.txt
512 echo universe >> bye.txt
504
513
505 echo % qpush should fail, local changes
514 echo % qpush should fail, local changes
506 hg qpush
515 hg qpush
507
516
508 echo % apply force, should discard changes in hello, but not bye
517 echo % apply force, should discard changes in hello, but not bye
509 hg qpush -f
518 hg qpush -f
510 hg st
519 hg st
511 hg diff --config diff.nodates=True
520 hg diff --config diff.nodates=True
512 hg qdiff --config diff.nodates=True
521 hg qdiff --config diff.nodates=True
513
522
514 echo % test popping revisions not in working dir ancestry
523 echo % test popping revisions not in working dir ancestry
515 hg qseries -v
524 hg qseries -v
516 hg up qparent
525 hg up qparent
517 hg qpop
526 hg qpop
@@ -1,573 +1,577 b''
1 % help
1 % help
2 mq extension - manage a stack of patches
2 mq extension - manage a stack of patches
3
3
4 This extension lets you work with a stack of patches in a Mercurial
4 This extension lets you work with a stack of patches in a Mercurial
5 repository. It manages two stacks of patches - all known patches, and applied
5 repository. It manages two stacks of patches - all known patches, and applied
6 patches (subset of known patches).
6 patches (subset of known patches).
7
7
8 Known patches are represented as patch files in the .hg/patches directory.
8 Known patches are represented as patch files in the .hg/patches directory.
9 Applied patches are both patch files and changesets.
9 Applied patches are both patch files and changesets.
10
10
11 Common tasks (use "hg help command" for more details):
11 Common tasks (use "hg help command" for more details):
12
12
13 prepare repository to work with patches qinit
13 prepare repository to work with patches qinit
14 create new patch qnew
14 create new patch qnew
15 import existing patch qimport
15 import existing patch qimport
16
16
17 print patch series qseries
17 print patch series qseries
18 print applied patches qapplied
18 print applied patches qapplied
19 print name of top applied patch qtop
19 print name of top applied patch qtop
20
20
21 add known patch to applied stack qpush
21 add known patch to applied stack qpush
22 remove patch from applied stack qpop
22 remove patch from applied stack qpop
23 refresh contents of top applied patch qrefresh
23 refresh contents of top applied patch qrefresh
24
24
25 list of commands:
25 list of commands:
26
26
27 qapplied print the patches already applied
27 qapplied print the patches already applied
28 qclone clone main and patch repository at same time
28 qclone clone main and patch repository at same time
29 qcommit commit changes in the queue repository
29 qcommit commit changes in the queue repository
30 qdelete remove patches from queue
30 qdelete remove patches from queue
31 qdiff diff of the current patch and subsequent modifications
31 qdiff diff of the current patch and subsequent modifications
32 qfinish move applied patches into repository history
32 qfinish move applied patches into repository history
33 qfold fold the named patches into the current patch
33 qfold fold the named patches into the current patch
34 qgoto push or pop patches until named patch is at top of stack
34 qgoto push or pop patches until named patch is at top of stack
35 qguard set or print guards for a patch
35 qguard set or print guards for a patch
36 qheader print the header of the topmost or specified patch
36 qheader print the header of the topmost or specified patch
37 qimport import a patch
37 qimport import a patch
38 qinit init a new queue repository
38 qinit init a new queue repository
39 qnew create a new patch
39 qnew create a new patch
40 qnext print the name of the next patch
40 qnext print the name of the next patch
41 qpop pop the current patch off the stack
41 qpop pop the current patch off the stack
42 qprev print the name of the previous patch
42 qprev print the name of the previous patch
43 qpush push the next patch onto the stack
43 qpush push the next patch onto the stack
44 qrefresh update the current patch
44 qrefresh update the current patch
45 qrename rename a patch
45 qrename rename a patch
46 qrestore restore the queue state saved by a revision
46 qrestore restore the queue state saved by a revision
47 qsave save current queue state
47 qsave save current queue state
48 qselect set or print guarded patches to push
48 qselect set or print guarded patches to push
49 qseries print the entire series file
49 qseries print the entire series file
50 qtop print the name of the current patch
50 qtop print the name of the current patch
51 qunapplied print the patches not yet applied
51 qunapplied print the patches not yet applied
52 strip strip a revision and all its descendants from the repository
52 strip strip a revision and all its descendants from the repository
53
53
54 enabled extensions:
54 enabled extensions:
55
55
56 mq manage a stack of patches
56 mq manage a stack of patches
57
57
58 use "hg -v help mq" to show aliases and global options
58 use "hg -v help mq" to show aliases and global options
59 adding a
59 adding a
60 updating working directory
60 updating working directory
61 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 adding b/z
62 adding b/z
63 % qinit
63 % qinit
64 % -R qinit
64 % -R qinit
65 % qinit -c
65 % qinit -c
66 A .hgignore
66 A .hgignore
67 A series
67 A series
68 % qinit; qinit -c
68 % qinit; qinit -c
69 .hgignore:
69 .hgignore:
70 ^\.hg
70 ^\.hg
71 ^\.mq
71 ^\.mq
72 syntax: glob
72 syntax: glob
73 status
73 status
74 guards
74 guards
75 series:
75 series:
76 abort: repository already exists!
76 abort: repository already exists!
77 % qinit; <stuff>; qinit -c
77 % qinit; <stuff>; qinit -c
78 adding .hg/patches/A
78 adding .hg/patches/A
79 adding .hg/patches/B
79 adding .hg/patches/B
80 A .hgignore
80 A .hgignore
81 A A
81 A A
82 A B
82 A B
83 A series
83 A series
84 .hgignore:
84 .hgignore:
85 status
85 status
86 bleh
86 bleh
87 series:
87 series:
88 A
88 A
89 B
89 B
90 % qrefresh
90 % qrefresh
91 foo bar
91 foo bar
92
92
93 diff -r xa
93 diff -r xa
94 --- a/a
94 --- a/a
95 +++ b/a
95 +++ b/a
96 @@ -1,1 +1,2 @@
96 @@ -1,1 +1,2 @@
97 a
97 a
98 +a
98 +a
99 % empty qrefresh
99 % empty qrefresh
100 revision:
100 revision:
101 patch:
101 patch:
102 foo bar
102 foo bar
103
103
104 working dir diff:
104 working dir diff:
105 --- a/a
105 --- a/a
106 +++ b/a
106 +++ b/a
107 @@ -1,1 +1,2 @@
107 @@ -1,1 +1,2 @@
108 a
108 a
109 +a
109 +a
110 % qpop
110 % qpop
111 popping test.patch
111 popping test.patch
112 patch queue now empty
112 patch queue now empty
113 % qpush
113 % qpush with dump of tag cache
114 .hg/tags.cache (pre qpush):
115 1
114 applying test.patch
116 applying test.patch
115 now at: test.patch
117 now at: test.patch
118 .hg/tags.cache (post qpush):
119 2
116 % pop/push outside repo
120 % pop/push outside repo
117 popping test.patch
121 popping test.patch
118 patch queue now empty
122 patch queue now empty
119 applying test.patch
123 applying test.patch
120 now at: test.patch
124 now at: test.patch
121 % qrefresh in subdir
125 % qrefresh in subdir
122 % pop/push -a in subdir
126 % pop/push -a in subdir
123 popping test2.patch
127 popping test2.patch
124 popping test.patch
128 popping test.patch
125 patch queue now empty
129 patch queue now empty
126 applying test.patch
130 applying test.patch
127 applying test2.patch
131 applying test2.patch
128 now at: test2.patch
132 now at: test2.patch
129 % qseries
133 % qseries
130 test.patch
134 test.patch
131 test2.patch
135 test2.patch
132 popping test2.patch
136 popping test2.patch
133 now at: test.patch
137 now at: test.patch
134 0 A test.patch: foo bar
138 0 A test.patch: foo bar
135 1 U test2.patch:
139 1 U test2.patch:
136 applying test2.patch
140 applying test2.patch
137 now at: test2.patch
141 now at: test2.patch
138 % qapplied
142 % qapplied
139 test.patch
143 test.patch
140 test2.patch
144 test2.patch
141 % qtop
145 % qtop
142 test2.patch
146 test2.patch
143 % qprev
147 % qprev
144 test.patch
148 test.patch
145 % qnext
149 % qnext
146 all patches applied
150 all patches applied
147 % pop, qnext, qprev, qapplied
151 % pop, qnext, qprev, qapplied
148 popping test2.patch
152 popping test2.patch
149 now at: test.patch
153 now at: test.patch
150 test2.patch
154 test2.patch
151 only one patch applied
155 only one patch applied
152 test.patch
156 test.patch
153 % commit should fail
157 % commit should fail
154 abort: cannot commit over an applied mq patch
158 abort: cannot commit over an applied mq patch
155 % push should fail
159 % push should fail
156 pushing to ../../k
160 pushing to ../../k
157 abort: source has mq patches applied
161 abort: source has mq patches applied
158 % import should fail
162 % import should fail
159 abort: cannot import over an applied patch
163 abort: cannot import over an applied patch
160 % qunapplied
164 % qunapplied
161 test2.patch
165 test2.patch
162 % qpush/qpop with index
166 % qpush/qpop with index
163 applying test2.patch
167 applying test2.patch
164 now at: test2.patch
168 now at: test2.patch
165 popping test2.patch
169 popping test2.patch
166 popping test1b.patch
170 popping test1b.patch
167 now at: test.patch
171 now at: test.patch
168 applying test1b.patch
172 applying test1b.patch
169 now at: test1b.patch
173 now at: test1b.patch
170 applying test2.patch
174 applying test2.patch
171 now at: test2.patch
175 now at: test2.patch
172 popping test2.patch
176 popping test2.patch
173 now at: test1b.patch
177 now at: test1b.patch
174 popping test1b.patch
178 popping test1b.patch
175 now at: test.patch
179 now at: test.patch
176 applying test1b.patch
180 applying test1b.patch
177 applying test2.patch
181 applying test2.patch
178 now at: test2.patch
182 now at: test2.patch
179 % push should succeed
183 % push should succeed
180 popping test2.patch
184 popping test2.patch
181 popping test1b.patch
185 popping test1b.patch
182 popping test.patch
186 popping test.patch
183 patch queue now empty
187 patch queue now empty
184 pushing to ../../k
188 pushing to ../../k
185 searching for changes
189 searching for changes
186 adding changesets
190 adding changesets
187 adding manifests
191 adding manifests
188 adding file changes
192 adding file changes
189 added 1 changesets with 1 changes to 1 files
193 added 1 changesets with 1 changes to 1 files
190 % qpush/qpop error codes
194 % qpush/qpop error codes
191 applying test.patch
195 applying test.patch
192 applying test1b.patch
196 applying test1b.patch
193 applying test2.patch
197 applying test2.patch
194 now at: test2.patch
198 now at: test2.patch
195 % pops all patches and succeeds
199 % pops all patches and succeeds
196 popping test2.patch
200 popping test2.patch
197 popping test1b.patch
201 popping test1b.patch
198 popping test.patch
202 popping test.patch
199 patch queue now empty
203 patch queue now empty
200 qpop -a succeeds
204 qpop -a succeeds
201 % does nothing and succeeds
205 % does nothing and succeeds
202 no patches applied
206 no patches applied
203 qpop -a succeeds
207 qpop -a succeeds
204 % fails - nothing else to pop
208 % fails - nothing else to pop
205 no patches applied
209 no patches applied
206 qpop fails
210 qpop fails
207 % pushes a patch and succeeds
211 % pushes a patch and succeeds
208 applying test.patch
212 applying test.patch
209 now at: test.patch
213 now at: test.patch
210 qpush succeeds
214 qpush succeeds
211 % pops a patch and succeeds
215 % pops a patch and succeeds
212 popping test.patch
216 popping test.patch
213 patch queue now empty
217 patch queue now empty
214 qpop succeeds
218 qpop succeeds
215 % pushes up to test1b.patch and succeeds
219 % pushes up to test1b.patch and succeeds
216 applying test.patch
220 applying test.patch
217 applying test1b.patch
221 applying test1b.patch
218 now at: test1b.patch
222 now at: test1b.patch
219 qpush test1b.patch succeeds
223 qpush test1b.patch succeeds
220 % does nothing and succeeds
224 % does nothing and succeeds
221 qpush: test1b.patch is already at the top
225 qpush: test1b.patch is already at the top
222 qpush test1b.patch succeeds
226 qpush test1b.patch succeeds
223 % does nothing and succeeds
227 % does nothing and succeeds
224 qpop: test1b.patch is already at the top
228 qpop: test1b.patch is already at the top
225 qpop test1b.patch succeeds
229 qpop test1b.patch succeeds
226 % fails - can't push to this patch
230 % fails - can't push to this patch
227 abort: cannot push to a previous patch: test.patch
231 abort: cannot push to a previous patch: test.patch
228 qpush test.patch fails
232 qpush test.patch fails
229 % fails - can't pop to this patch
233 % fails - can't pop to this patch
230 abort: patch test2.patch is not applied
234 abort: patch test2.patch is not applied
231 qpop test2.patch fails
235 qpop test2.patch fails
232 % pops up to test.patch and succeeds
236 % pops up to test.patch and succeeds
233 popping test1b.patch
237 popping test1b.patch
234 now at: test.patch
238 now at: test.patch
235 qpop test.patch succeeds
239 qpop test.patch succeeds
236 % pushes all patches and succeeds
240 % pushes all patches and succeeds
237 applying test1b.patch
241 applying test1b.patch
238 applying test2.patch
242 applying test2.patch
239 now at: test2.patch
243 now at: test2.patch
240 qpush -a succeeds
244 qpush -a succeeds
241 % does nothing and succeeds
245 % does nothing and succeeds
242 all patches are currently applied
246 all patches are currently applied
243 qpush -a succeeds
247 qpush -a succeeds
244 % fails - nothing else to push
248 % fails - nothing else to push
245 patch series already fully applied
249 patch series already fully applied
246 qpush fails
250 qpush fails
247 % does nothing and succeeds
251 % does nothing and succeeds
248 qpush: test2.patch is already at the top
252 qpush: test2.patch is already at the top
249 qpush test2.patch succeeds
253 qpush test2.patch succeeds
250 % strip
254 % strip
251 adding x
255 adding x
252 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
256 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
253 saving bundle to
257 saving bundle to
254 adding changesets
258 adding changesets
255 adding manifests
259 adding manifests
256 adding file changes
260 adding file changes
257 added 1 changesets with 1 changes to 1 files
261 added 1 changesets with 1 changes to 1 files
258 (run 'hg update' to get a working copy)
262 (run 'hg update' to get a working copy)
259 % strip with local changes, should complain
263 % strip with local changes, should complain
260 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
264 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
261 abort: local changes found
265 abort: local changes found
262 % --force strip with local changes
266 % --force strip with local changes
263 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
267 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
264 saving bundle to
268 saving bundle to
265 % cd b; hg qrefresh
269 % cd b; hg qrefresh
266 adding a
270 adding a
267 foo
271 foo
268
272
269 diff -r cb9a9f314b8b a
273 diff -r cb9a9f314b8b a
270 --- a/a
274 --- a/a
271 +++ b/a
275 +++ b/a
272 @@ -1,1 +1,2 @@
276 @@ -1,1 +1,2 @@
273 a
277 a
274 +a
278 +a
275 diff -r cb9a9f314b8b b/f
279 diff -r cb9a9f314b8b b/f
276 --- /dev/null
280 --- /dev/null
277 +++ b/b/f
281 +++ b/b/f
278 @@ -0,0 +1,1 @@
282 @@ -0,0 +1,1 @@
279 +f
283 +f
280 % hg qrefresh .
284 % hg qrefresh .
281 foo
285 foo
282
286
283 diff -r cb9a9f314b8b b/f
287 diff -r cb9a9f314b8b b/f
284 --- /dev/null
288 --- /dev/null
285 +++ b/b/f
289 +++ b/b/f
286 @@ -0,0 +1,1 @@
290 @@ -0,0 +1,1 @@
287 +f
291 +f
288 M a
292 M a
289 % qpush failure
293 % qpush failure
290 popping bar
294 popping bar
291 popping foo
295 popping foo
292 patch queue now empty
296 patch queue now empty
293 applying foo
297 applying foo
294 applying bar
298 applying bar
295 file foo already exists
299 file foo already exists
296 1 out of 1 hunks FAILED -- saving rejects to file foo.rej
300 1 out of 1 hunks FAILED -- saving rejects to file foo.rej
297 patch failed, unable to continue (try -v)
301 patch failed, unable to continue (try -v)
298 patch failed, rejects left in working dir
302 patch failed, rejects left in working dir
299 errors during apply, please fix and refresh bar
303 errors during apply, please fix and refresh bar
300 ? foo
304 ? foo
301 ? foo.rej
305 ? foo.rej
302 % mq tags
306 % mq tags
303 0 qparent
307 0 qparent
304 1 qbase foo
308 1 qbase foo
305 2 qtip bar tip
309 2 qtip bar tip
306 % bad node in status
310 % bad node in status
307 popping bar
311 popping bar
308 now at: foo
312 now at: foo
309 changeset: 0:cb9a9f314b8b
313 changeset: 0:cb9a9f314b8b
310 mq status file refers to unknown node
314 mq status file refers to unknown node
311 tag: tip
315 tag: tip
312 user: test
316 user: test
313 date: Thu Jan 01 00:00:00 1970 +0000
317 date: Thu Jan 01 00:00:00 1970 +0000
314 summary: a
318 summary: a
315
319
316 mq status file refers to unknown node
320 mq status file refers to unknown node
317 default 0:cb9a9f314b8b
321 default 0:cb9a9f314b8b
318 abort: trying to pop unknown node
322 abort: trying to pop unknown node
319 new file
323 new file
320
324
321 diff --git a/new b/new
325 diff --git a/new b/new
322 new file mode 100755
326 new file mode 100755
323 --- /dev/null
327 --- /dev/null
324 +++ b/new
328 +++ b/new
325 @@ -0,0 +1,1 @@
329 @@ -0,0 +1,1 @@
326 +foo
330 +foo
327 copy file
331 copy file
328
332
329 diff --git a/new b/copy
333 diff --git a/new b/copy
330 copy from new
334 copy from new
331 copy to copy
335 copy to copy
332 popping copy
336 popping copy
333 now at: new
337 now at: new
334 applying copy
338 applying copy
335 now at: copy
339 now at: copy
336 diff --git a/new b/copy
340 diff --git a/new b/copy
337 copy from new
341 copy from new
338 copy to copy
342 copy to copy
339 diff --git a/new b/copy
343 diff --git a/new b/copy
340 copy from new
344 copy from new
341 copy to copy
345 copy to copy
342 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
346 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
343 created new head
347 created new head
344 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
348 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
345 popping bar
349 popping bar
346 adding branch
350 adding branch
347 adding changesets
351 adding changesets
348 adding manifests
352 adding manifests
349 adding file changes
353 adding file changes
350 added 1 changesets with 1 changes to 1 files
354 added 1 changesets with 1 changes to 1 files
351 patch queue now empty
355 patch queue now empty
352 (working directory not at a head)
356 (working directory not at a head)
353 applying bar
357 applying bar
354 now at: bar
358 now at: bar
355 diff --git a/bar b/bar
359 diff --git a/bar b/bar
356 new file mode 100644
360 new file mode 100644
357 --- /dev/null
361 --- /dev/null
358 +++ b/bar
362 +++ b/bar
359 @@ -0,0 +1,1 @@
363 @@ -0,0 +1,1 @@
360 +bar
364 +bar
361 diff --git a/foo b/baz
365 diff --git a/foo b/baz
362 rename from foo
366 rename from foo
363 rename to baz
367 rename to baz
364 2 baz (foo)
368 2 baz (foo)
365 diff --git a/bar b/bar
369 diff --git a/bar b/bar
366 new file mode 100644
370 new file mode 100644
367 --- /dev/null
371 --- /dev/null
368 +++ b/bar
372 +++ b/bar
369 @@ -0,0 +1,1 @@
373 @@ -0,0 +1,1 @@
370 +bar
374 +bar
371 diff --git a/foo b/baz
375 diff --git a/foo b/baz
372 rename from foo
376 rename from foo
373 rename to baz
377 rename to baz
374 2 baz (foo)
378 2 baz (foo)
375 diff --git a/bar b/bar
379 diff --git a/bar b/bar
376 diff --git a/foo b/baz
380 diff --git a/foo b/baz
377
381
378 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
382 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
379 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
383 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
380 popping bar
384 popping bar
381 adding branch
385 adding branch
382 adding changesets
386 adding changesets
383 adding manifests
387 adding manifests
384 adding file changes
388 adding file changes
385 added 1 changesets with 1 changes to 1 files
389 added 1 changesets with 1 changes to 1 files
386 patch queue now empty
390 patch queue now empty
387 (working directory not at a head)
391 (working directory not at a head)
388 applying bar
392 applying bar
389 now at: bar
393 now at: bar
390 diff --git a/foo b/bleh
394 diff --git a/foo b/bleh
391 rename from foo
395 rename from foo
392 rename to bleh
396 rename to bleh
393 diff --git a/quux b/quux
397 diff --git a/quux b/quux
394 new file mode 100644
398 new file mode 100644
395 --- /dev/null
399 --- /dev/null
396 +++ b/quux
400 +++ b/quux
397 @@ -0,0 +1,1 @@
401 @@ -0,0 +1,1 @@
398 +bar
402 +bar
399 3 bleh (foo)
403 3 bleh (foo)
400 diff --git a/foo b/barney
404 diff --git a/foo b/barney
401 rename from foo
405 rename from foo
402 rename to barney
406 rename to barney
403 diff --git a/fred b/fred
407 diff --git a/fred b/fred
404 new file mode 100644
408 new file mode 100644
405 --- /dev/null
409 --- /dev/null
406 +++ b/fred
410 +++ b/fred
407 @@ -0,0 +1,1 @@
411 @@ -0,0 +1,1 @@
408 +bar
412 +bar
409 3 barney (foo)
413 3 barney (foo)
410 % refresh omitting an added file
414 % refresh omitting an added file
411 C newfile
415 C newfile
412 A newfile
416 A newfile
413 popping baz
417 popping baz
414 now at: bar
418 now at: bar
415 % create a git patch
419 % create a git patch
416 diff --git a/alexander b/alexander
420 diff --git a/alexander b/alexander
417 % create a git binary patch
421 % create a git binary patch
418 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
422 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
419 diff --git a/bucephalus b/bucephalus
423 diff --git a/bucephalus b/bucephalus
420 % check binary patches can be popped and pushed
424 % check binary patches can be popped and pushed
421 popping addbucephalus
425 popping addbucephalus
422 now at: addalexander
426 now at: addalexander
423 applying addbucephalus
427 applying addbucephalus
424 now at: addbucephalus
428 now at: addbucephalus
425 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
429 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
426 % strip again
430 % strip again
427 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
431 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
428 created new head
432 created new head
429 merging foo
433 merging foo
430 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
434 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
431 (branch merge, don't forget to commit)
435 (branch merge, don't forget to commit)
432 changeset: 3:99615015637b
436 changeset: 3:99615015637b
433 tag: tip
437 tag: tip
434 parent: 2:20cbbe65cff7
438 parent: 2:20cbbe65cff7
435 parent: 1:d2871fc282d4
439 parent: 1:d2871fc282d4
436 user: test
440 user: test
437 date: Thu Jan 01 00:00:00 1970 +0000
441 date: Thu Jan 01 00:00:00 1970 +0000
438 summary: merge
442 summary: merge
439
443
440 changeset: 2:20cbbe65cff7
444 changeset: 2:20cbbe65cff7
441 parent: 0:53245c60e682
445 parent: 0:53245c60e682
442 user: test
446 user: test
443 date: Thu Jan 01 00:00:00 1970 +0000
447 date: Thu Jan 01 00:00:00 1970 +0000
444 summary: change foo 2
448 summary: change foo 2
445
449
446 changeset: 1:d2871fc282d4
450 changeset: 1:d2871fc282d4
447 user: test
451 user: test
448 date: Thu Jan 01 00:00:00 1970 +0000
452 date: Thu Jan 01 00:00:00 1970 +0000
449 summary: change foo 1
453 summary: change foo 1
450
454
451 changeset: 0:53245c60e682
455 changeset: 0:53245c60e682
452 user: test
456 user: test
453 date: Thu Jan 01 00:00:00 1970 +0000
457 date: Thu Jan 01 00:00:00 1970 +0000
454 summary: add foo
458 summary: add foo
455
459
456 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
460 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
457 saving bundle to
461 saving bundle to
458 saving bundle to
462 saving bundle to
459 adding branch
463 adding branch
460 adding changesets
464 adding changesets
461 adding manifests
465 adding manifests
462 adding file changes
466 adding file changes
463 added 1 changesets with 1 changes to 1 files
467 added 1 changesets with 1 changes to 1 files
464 changeset: 1:20cbbe65cff7
468 changeset: 1:20cbbe65cff7
465 tag: tip
469 tag: tip
466 user: test
470 user: test
467 date: Thu Jan 01 00:00:00 1970 +0000
471 date: Thu Jan 01 00:00:00 1970 +0000
468 summary: change foo 2
472 summary: change foo 2
469
473
470 changeset: 0:53245c60e682
474 changeset: 0:53245c60e682
471 user: test
475 user: test
472 date: Thu Jan 01 00:00:00 1970 +0000
476 date: Thu Jan 01 00:00:00 1970 +0000
473 summary: add foo
477 summary: add foo
474
478
475 % qclone
479 % qclone
476 abort: versioned patch repository not found (see qinit -c)
480 abort: versioned patch repository not found (see qinit -c)
477 adding .hg/patches/patch1
481 adding .hg/patches/patch1
478 main repo:
482 main repo:
479 rev 1: change foo
483 rev 1: change foo
480 rev 0: add foo
484 rev 0: add foo
481 patch repo:
485 patch repo:
482 rev 0: checkpoint
486 rev 0: checkpoint
483 updating working directory
487 updating working directory
484 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
488 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
485 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
489 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
486 main repo:
490 main repo:
487 rev 0: add foo
491 rev 0: add foo
488 patch repo:
492 patch repo:
489 rev 0: checkpoint
493 rev 0: checkpoint
490 popping patch1
494 popping patch1
491 patch queue now empty
495 patch queue now empty
492 main repo:
496 main repo:
493 rev 0: add foo
497 rev 0: add foo
494 patch repo:
498 patch repo:
495 rev 0: checkpoint
499 rev 0: checkpoint
496 updating working directory
500 updating working directory
497 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
501 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
498 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
502 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
499 main repo:
503 main repo:
500 rev 0: add foo
504 rev 0: add foo
501 patch repo:
505 patch repo:
502 rev 0: checkpoint
506 rev 0: checkpoint
503 % test applying on an empty file (issue 1033)
507 % test applying on an empty file (issue 1033)
504 adding a
508 adding a
505 popping changea
509 popping changea
506 patch queue now empty
510 patch queue now empty
507 applying changea
511 applying changea
508 now at: changea
512 now at: changea
509 % test qpush with --force, issue1087
513 % test qpush with --force, issue1087
510 adding bye.txt
514 adding bye.txt
511 adding hello.txt
515 adding hello.txt
512 popping empty
516 popping empty
513 patch queue now empty
517 patch queue now empty
514 % qpush should fail, local changes
518 % qpush should fail, local changes
515 abort: local changes found, refresh first
519 abort: local changes found, refresh first
516 % apply force, should not discard changes with empty patch
520 % apply force, should not discard changes with empty patch
517 applying empty
521 applying empty
518 patch empty is empty
522 patch empty is empty
519 now at: empty
523 now at: empty
520 diff -r bf5fc3f07a0a hello.txt
524 diff -r bf5fc3f07a0a hello.txt
521 --- a/hello.txt
525 --- a/hello.txt
522 +++ b/hello.txt
526 +++ b/hello.txt
523 @@ -1,1 +1,2 @@
527 @@ -1,1 +1,2 @@
524 hello
528 hello
525 +world
529 +world
526 diff -r 9ecee4f634e3 hello.txt
530 diff -r 9ecee4f634e3 hello.txt
527 --- a/hello.txt
531 --- a/hello.txt
528 +++ b/hello.txt
532 +++ b/hello.txt
529 @@ -1,1 +1,2 @@
533 @@ -1,1 +1,2 @@
530 hello
534 hello
531 +world
535 +world
532 changeset: 1:bf5fc3f07a0a
536 changeset: 1:bf5fc3f07a0a
533 tag: qtip
537 tag: qtip
534 tag: tip
538 tag: tip
535 tag: empty
539 tag: empty
536 tag: qbase
540 tag: qbase
537 user: test
541 user: test
538 date: Thu Jan 01 00:00:00 1970 +0000
542 date: Thu Jan 01 00:00:00 1970 +0000
539 summary: imported patch empty
543 summary: imported patch empty
540
544
541
545
542 popping empty
546 popping empty
543 patch queue now empty
547 patch queue now empty
544 % qpush should fail, local changes
548 % qpush should fail, local changes
545 abort: local changes found, refresh first
549 abort: local changes found, refresh first
546 % apply force, should discard changes in hello, but not bye
550 % apply force, should discard changes in hello, but not bye
547 applying empty
551 applying empty
548 now at: empty
552 now at: empty
549 M bye.txt
553 M bye.txt
550 diff -r ba252371dbc1 bye.txt
554 diff -r ba252371dbc1 bye.txt
551 --- a/bye.txt
555 --- a/bye.txt
552 +++ b/bye.txt
556 +++ b/bye.txt
553 @@ -1,1 +1,2 @@
557 @@ -1,1 +1,2 @@
554 bye
558 bye
555 +universe
559 +universe
556 diff -r 9ecee4f634e3 bye.txt
560 diff -r 9ecee4f634e3 bye.txt
557 --- a/bye.txt
561 --- a/bye.txt
558 +++ b/bye.txt
562 +++ b/bye.txt
559 @@ -1,1 +1,2 @@
563 @@ -1,1 +1,2 @@
560 bye
564 bye
561 +universe
565 +universe
562 diff -r 9ecee4f634e3 hello.txt
566 diff -r 9ecee4f634e3 hello.txt
563 --- a/hello.txt
567 --- a/hello.txt
564 +++ b/hello.txt
568 +++ b/hello.txt
565 @@ -1,1 +1,3 @@
569 @@ -1,1 +1,3 @@
566 hello
570 hello
567 +world
571 +world
568 +universe
572 +universe
569 % test popping revisions not in working dir ancestry
573 % test popping revisions not in working dir ancestry
570 0 A empty
574 0 A empty
571 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
575 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
572 popping empty
576 popping empty
573 patch queue now empty
577 patch queue now empty
@@ -1,191 +1,202 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 cacheexists() {
4 [ -f .hg/tags.cache ] && echo "tag cache exists" || echo "no tag cache"
5 }
6
3 echo "% setup"
7 echo "% setup"
4 mkdir t
8 mkdir t
5 cd t
9 cd t
6 hg init
10 hg init
11 cacheexists
7 hg id
12 hg id
13 cacheexists
8 echo a > a
14 echo a > a
9 hg add a
15 hg add a
10 hg commit -m "test"
16 hg commit -m "test"
11 hg co
17 hg co
12 hg identify
18 hg identify
19 cacheexists
13
20
14 echo "% create local tag with long name"
21 echo "% create local tag with long name"
15 T=`hg identify --debug --id`
22 T=`hg identify --debug --id`
16 hg tag -l "This is a local tag with a really long name!"
23 hg tag -l "This is a local tag with a really long name!"
17 hg tags
24 hg tags
18 rm .hg/localtags
25 rm .hg/localtags
19
26
20 echo "% create a tag behind hg's back"
27 echo "% create a tag behind hg's back"
21 echo "$T first" > .hgtags
28 echo "$T first" > .hgtags
22 cat .hgtags
29 cat .hgtags
23 hg add .hgtags
30 hg add .hgtags
24 hg commit -m "add tags"
31 hg commit -m "add tags"
25 hg tags
32 hg tags
26 hg identify
33 hg identify
27
34
35 # repeat with cold tag cache
36 rm -f .hg/tags.cache
37 hg identify
38
28 echo "% create a branch"
39 echo "% create a branch"
29 echo bb > a
40 echo bb > a
30 hg status
41 hg status
31 hg identify
42 hg identify
32 hg co first
43 hg co first
33 hg id
44 hg id
34 hg -v id
45 hg -v id
35 hg status
46 hg status
36 echo 1 > b
47 echo 1 > b
37 hg add b
48 hg add b
38 hg commit -m "branch"
49 hg commit -m "branch"
39 hg id
50 hg id
40
51
41 echo "% merge the two heads"
52 echo "% merge the two heads"
42 hg merge 1
53 hg merge 1
43 hg id
54 hg id
44 hg status
55 hg status
45
56
46 hg commit -m "merge"
57 hg commit -m "merge"
47
58
48 echo "% create fake head, make sure tag not visible afterwards"
59 echo "% create fake head, make sure tag not visible afterwards"
49 cp .hgtags tags
60 cp .hgtags tags
50 hg tag last
61 hg tag last
51 hg rm .hgtags
62 hg rm .hgtags
52 hg commit -m "remove"
63 hg commit -m "remove"
53
64
54 mv tags .hgtags
65 mv tags .hgtags
55 hg add .hgtags
66 hg add .hgtags
56 hg commit -m "readd"
67 hg commit -m "readd"
57
68
58 hg tags
69 hg tags
59
70
60 echo "% add invalid tags"
71 echo "% add invalid tags"
61 echo "spam" >> .hgtags
72 echo "spam" >> .hgtags
62 echo >> .hgtags
73 echo >> .hgtags
63 echo "foo bar" >> .hgtags
74 echo "foo bar" >> .hgtags
64 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
75 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
65 echo "committing .hgtags:"
76 echo "committing .hgtags:"
66 cat .hgtags
77 cat .hgtags
67 hg commit -m "tags"
78 hg commit -m "tags"
68
79
69 echo "% report tag parse error on other head"
80 echo "% report tag parse error on other head"
70 hg up 3
81 hg up 3
71 echo 'x y' >> .hgtags
82 echo 'x y' >> .hgtags
72 hg commit -m "head"
83 hg commit -m "head"
73
84
74 hg tags
85 hg tags
75 hg tip
86 hg tip
76
87
77 echo "% test tag precedence rules"
88 echo "% test tag precedence rules"
78 cd ..
89 cd ..
79 hg init t2
90 hg init t2
80 cd t2
91 cd t2
81 echo foo > foo
92 echo foo > foo
82 hg add foo
93 hg add foo
83 hg ci -m 'add foo' # rev 0
94 hg ci -m 'add foo' # rev 0
84 hg tag bar # rev 1
95 hg tag bar # rev 1
85 echo >> foo
96 echo >> foo
86 hg ci -m 'change foo 1' # rev 2
97 hg ci -m 'change foo 1' # rev 2
87 hg up -C 1
98 hg up -C 1
88 hg tag -r 1 -f bar # rev 3
99 hg tag -r 1 -f bar # rev 3
89 hg up -C 1
100 hg up -C 1
90 echo >> foo
101 echo >> foo
91 hg ci -m 'change foo 2' # rev 4
102 hg ci -m 'change foo 2' # rev 4
92 hg tags
103 hg tags
93 hg tags # repeat in case of cache effects
104 hg tags # repeat in case of cache effects
94
105
95 dumptags() {
106 dumptags() {
96 rev=$1
107 rev=$1
97 echo "rev $rev: .hgtags:"
108 echo "rev $rev: .hgtags:"
98 hg cat -r$rev .hgtags
109 hg cat -r$rev .hgtags
99 }
110 }
100
111
101 echo "% detailed dump of tag info"
112 echo "% detailed dump of tag info"
102 echo "heads:"
113 echo "heads:"
103 hg heads -q # expect 4, 3, 2
114 hg heads -q # expect 4, 3, 2
104 dumptags 2
115 dumptags 2
105 dumptags 3
116 dumptags 3
106 dumptags 4
117 dumptags 4
107 echo ".hg/tags.cache:"
118 echo ".hg/tags.cache:"
108 [ -f .hg/tags.cache ] && cat .hg/tags.cache || echo "no such file"
119 [ -f .hg/tags.cache ] && cat .hg/tags.cache || echo "no such file"
109
120
110 echo "% test tag removal"
121 echo "% test tag removal"
111 hg tag --remove bar # rev 5
122 hg tag --remove bar # rev 5
112 hg tip -vp
123 hg tip -vp
113 hg tags
124 hg tags
114 hg tags # again, try to expose cache bugs
125 hg tags # again, try to expose cache bugs
115
126
116 echo '% remove nonexistent tag'
127 echo '% remove nonexistent tag'
117 hg tag --remove foobar
128 hg tag --remove foobar
118 hg tip
129 hg tip
119
130
120 echo "% rollback undoes tag operation"
131 echo "% rollback undoes tag operation"
121 hg rollback # destroy rev 5 (restore bar)
132 hg rollback # destroy rev 5 (restore bar)
122 hg tags
133 hg tags
123 hg tags
134 hg tags
124
135
125 echo "% test tag rank"
136 echo "% test tag rank"
126 cd ..
137 cd ..
127 hg init t3
138 hg init t3
128 cd t3
139 cd t3
129 echo foo > foo
140 echo foo > foo
130 hg add foo
141 hg add foo
131 hg ci -m 'add foo' # rev 0
142 hg ci -m 'add foo' # rev 0
132 hg tag -f bar # rev 1 bar -> 0
143 hg tag -f bar # rev 1 bar -> 0
133 hg tag -f bar # rev 2 bar -> 1
144 hg tag -f bar # rev 2 bar -> 1
134 hg tag -fr 0 bar # rev 3 bar -> 0
145 hg tag -fr 0 bar # rev 3 bar -> 0
135 hg tag -fr 1 bar # rev 4 bar -> 1
146 hg tag -fr 1 bar # rev 4 bar -> 1
136 hg tag -fr 0 bar # rev 5 bar -> 0
147 hg tag -fr 0 bar # rev 5 bar -> 0
137 hg tags
148 hg tags
138 hg co 3
149 hg co 3
139 echo barbar > foo
150 echo barbar > foo
140 hg ci -m 'change foo' # rev 6
151 hg ci -m 'change foo' # rev 6
141 hg tags
152 hg tags
142
153
143 echo "% don't allow moving tag without -f"
154 echo "% don't allow moving tag without -f"
144 hg tag -r 3 bar
155 hg tag -r 3 bar
145 hg tags
156 hg tags
146
157
147 echo "% strip 1: expose an old head"
158 echo "% strip 1: expose an old head"
148 hg --config extensions.mq= strip 5 > /dev/null 2>&1
159 hg --config extensions.mq= strip 5 > /dev/null 2>&1
149 hg tags # partly stale cache
160 hg tags # partly stale cache
150 hg tags # up-to-date cache
161 hg tags # up-to-date cache
151 echo "% strip 2: destroy whole branch, no old head exposed"
162 echo "% strip 2: destroy whole branch, no old head exposed"
152 hg --config extensions.mq= strip 4 > /dev/null 2>&1
163 hg --config extensions.mq= strip 4 > /dev/null 2>&1
153 hg tags # partly stale
164 hg tags # partly stale
154 rm -f .hg/tags.cache
165 rm -f .hg/tags.cache
155 hg tags # cold cache
166 hg tags # cold cache
156
167
157 echo "% test tag rank with 3 heads"
168 echo "% test tag rank with 3 heads"
158 cd ..
169 cd ..
159 hg init t4
170 hg init t4
160 cd t4
171 cd t4
161 echo foo > foo
172 echo foo > foo
162 hg add
173 hg add
163 hg ci -m 'add foo' # rev 0
174 hg ci -m 'add foo' # rev 0
164 hg tag bar # rev 1 bar -> 0
175 hg tag bar # rev 1 bar -> 0
165 hg tag -f bar # rev 2 bar -> 1
176 hg tag -f bar # rev 2 bar -> 1
166 hg up -qC 0
177 hg up -qC 0
167 hg tag -fr 2 bar # rev 3 bar -> 2
178 hg tag -fr 2 bar # rev 3 bar -> 2
168 hg tags
179 hg tags
169 hg up -qC 0
180 hg up -qC 0
170 hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
181 hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
171 echo "% bar should still point to rev 2"
182 echo "% bar should still point to rev 2"
172 hg tags
183 hg tags
173
184
174
185
175 echo "% remove local as global and global as local"
186 echo "% remove local as global and global as local"
176 # test that removing global/local tags does not get confused when trying
187 # test that removing global/local tags does not get confused when trying
177 # to remove a tag of type X which actually only exists as a type Y
188 # to remove a tag of type X which actually only exists as a type Y
178 cd ..
189 cd ..
179 hg init t5
190 hg init t5
180 cd t5
191 cd t5
181 echo foo > foo
192 echo foo > foo
182 hg add
193 hg add
183 hg ci -m 'add foo' # rev 0
194 hg ci -m 'add foo' # rev 0
184
195
185 hg tag -r 0 -l localtag
196 hg tag -r 0 -l localtag
186 hg tag --remove localtag
197 hg tag --remove localtag
187
198
188 hg tag -r 0 globaltag
199 hg tag -r 0 globaltag
189 hg tag --remove -l globaltag
200 hg tag --remove -l globaltag
190 hg tags -v
201 hg tags -v
191 exit 0
202 exit 0
@@ -1,145 +1,151 b''
1 % setup
1 % setup
2 no tag cache
2 000000000000 tip
3 000000000000 tip
4 no tag cache
3 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 acb14030fe0a tip
6 acb14030fe0a tip
7 tag cache exists
5 % create local tag with long name
8 % create local tag with long name
6 tip 0:acb14030fe0a
9 tip 0:acb14030fe0a
7 This is a local tag with a really long name! 0:acb14030fe0a
10 This is a local tag with a really long name! 0:acb14030fe0a
8 % create a tag behind hg's back
11 % create a tag behind hg's back
9 acb14030fe0a21b60322c440ad2d20cf7685a376 first
12 acb14030fe0a21b60322c440ad2d20cf7685a376 first
10 tip 1:b9154636be93
13 tip 1:b9154636be93
11 first 0:acb14030fe0a
14 first 0:acb14030fe0a
12 b9154636be93 tip
15 b9154636be93 tip
16 b9154636be93 tip
13 % create a branch
17 % create a branch
14 M a
18 M a
15 b9154636be93+ tip
19 b9154636be93+ tip
16 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
20 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
17 acb14030fe0a+ first
21 acb14030fe0a+ first
18 acb14030fe0a+ first
22 acb14030fe0a+ first
19 M a
23 M a
20 created new head
24 created new head
21 c8edf04160c7 tip
25 c8edf04160c7 tip
22 % merge the two heads
26 % merge the two heads
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 (branch merge, don't forget to commit)
28 (branch merge, don't forget to commit)
25 c8edf04160c7+b9154636be93+ tip
29 c8edf04160c7+b9154636be93+ tip
26 M .hgtags
30 M .hgtags
27 % create fake head, make sure tag not visible afterwards
31 % create fake head, make sure tag not visible afterwards
28 tip 6:35ff301afafe
32 tip 6:35ff301afafe
29 first 0:acb14030fe0a
33 first 0:acb14030fe0a
30 % add invalid tags
34 % add invalid tags
31 committing .hgtags:
35 committing .hgtags:
32 acb14030fe0a21b60322c440ad2d20cf7685a376 first
36 acb14030fe0a21b60322c440ad2d20cf7685a376 first
33 spam
37 spam
34
38
35 foo bar
39 foo bar
36 % report tag parse error on other head
40 % report tag parse error on other head
37 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
38 created new head
42 created new head
39 .hgtags@75d9f02dfe28, line 2: cannot parse entry
43 .hgtags@75d9f02dfe28, line 2: cannot parse entry
40 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
44 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
41 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
45 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
42 tip 8:c4be69a18c11
46 tip 8:c4be69a18c11
43 first 0:acb14030fe0a
47 first 0:acb14030fe0a
44 changeset: 8:c4be69a18c11
48 changeset: 8:c4be69a18c11
45 .hgtags@75d9f02dfe28, line 2: cannot parse entry
49 .hgtags@75d9f02dfe28, line 2: cannot parse entry
46 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
50 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
47 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
51 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
48 tag: tip
52 tag: tip
49 parent: 3:ac5e980c4dc0
53 parent: 3:ac5e980c4dc0
50 user: test
54 user: test
51 date: Thu Jan 01 00:00:00 1970 +0000
55 date: Thu Jan 01 00:00:00 1970 +0000
52 summary: head
56 summary: head
53
57
54 % test tag precedence rules
58 % test tag precedence rules
55 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
56 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 created new head
61 created new head
58 tip 4:0c192d7d5e6b
62 tip 4:0c192d7d5e6b
59 bar 1:78391a272241
63 bar 1:78391a272241
60 tip 4:0c192d7d5e6b
64 tip 4:0c192d7d5e6b
61 bar 1:78391a272241
65 bar 1:78391a272241
62 % detailed dump of tag info
66 % detailed dump of tag info
63 heads:
67 heads:
64 4:0c192d7d5e6b
68 4:0c192d7d5e6b
65 3:6fa450212aeb
69 3:6fa450212aeb
66 2:7a94127795a3
70 2:7a94127795a3
67 rev 2: .hgtags:
71 rev 2: .hgtags:
68 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
72 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
69 rev 3: .hgtags:
73 rev 3: .hgtags:
70 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
74 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
71 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
75 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
72 78391a272241d70354aa14c874552cad6b51bb42 bar
76 78391a272241d70354aa14c874552cad6b51bb42 bar
73 rev 4: .hgtags:
77 rev 4: .hgtags:
74 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
78 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
75 .hg/tags.cache:
79 .hg/tags.cache:
76 no such file
80 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
81 3 6fa450212aeb2a21ed616a54aea39a4a27894cd7 7d3b718c964ef37b89e550ebdafd5789e76ce1b0
82 2 7a94127795a33c10a370c93f731fd9fea0b79af6 0c04f2a8af31de17fab7422878ee5a2dadbc943d
77 % test tag removal
83 % test tag removal
78 changeset: 5:5f6e8655b1c7
84 changeset: 5:5f6e8655b1c7
79 tag: tip
85 tag: tip
80 user: test
86 user: test
81 date: Thu Jan 01 00:00:00 1970 +0000
87 date: Thu Jan 01 00:00:00 1970 +0000
82 files: .hgtags
88 files: .hgtags
83 description:
89 description:
84 Removed tag bar
90 Removed tag bar
85
91
86
92
87 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
93 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
88 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
94 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
89 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
95 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
90 @@ -1,1 +1,3 @@
96 @@ -1,1 +1,3 @@
91 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
97 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
92 +78391a272241d70354aa14c874552cad6b51bb42 bar
98 +78391a272241d70354aa14c874552cad6b51bb42 bar
93 +0000000000000000000000000000000000000000 bar
99 +0000000000000000000000000000000000000000 bar
94
100
95 tip 5:5f6e8655b1c7
101 tip 5:5f6e8655b1c7
96 tip 5:5f6e8655b1c7
102 tip 5:5f6e8655b1c7
97 % remove nonexistent tag
103 % remove nonexistent tag
98 abort: tag 'foobar' does not exist
104 abort: tag 'foobar' does not exist
99 changeset: 5:5f6e8655b1c7
105 changeset: 5:5f6e8655b1c7
100 tag: tip
106 tag: tip
101 user: test
107 user: test
102 date: Thu Jan 01 00:00:00 1970 +0000
108 date: Thu Jan 01 00:00:00 1970 +0000
103 summary: Removed tag bar
109 summary: Removed tag bar
104
110
105 % rollback undoes tag operation
111 % rollback undoes tag operation
106 rolling back last transaction
112 rolling back last transaction
107 tip 4:0c192d7d5e6b
113 tip 4:0c192d7d5e6b
108 bar 1:78391a272241
114 bar 1:78391a272241
109 tip 4:0c192d7d5e6b
115 tip 4:0c192d7d5e6b
110 bar 1:78391a272241
116 bar 1:78391a272241
111 % test tag rank
117 % test tag rank
112 tip 5:85f05169d91d
118 tip 5:85f05169d91d
113 bar 0:bbd179dfa0a7
119 bar 0:bbd179dfa0a7
114 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
120 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
115 created new head
121 created new head
116 tip 6:735c3ca72986
122 tip 6:735c3ca72986
117 bar 0:bbd179dfa0a7
123 bar 0:bbd179dfa0a7
118 % don't allow moving tag without -f
124 % don't allow moving tag without -f
119 abort: tag 'bar' already exists (use -f to force)
125 abort: tag 'bar' already exists (use -f to force)
120 tip 6:735c3ca72986
126 tip 6:735c3ca72986
121 bar 0:bbd179dfa0a7
127 bar 0:bbd179dfa0a7
122 % strip 1: expose an old head
128 % strip 1: expose an old head
123 tip 5:735c3ca72986
129 tip 5:735c3ca72986
124 bar 1:78391a272241
130 bar 1:78391a272241
125 tip 5:735c3ca72986
131 tip 5:735c3ca72986
126 bar 1:78391a272241
132 bar 1:78391a272241
127 % strip 2: destroy whole branch, no old head exposed
133 % strip 2: destroy whole branch, no old head exposed
128 tip 4:735c3ca72986
134 tip 4:735c3ca72986
129 bar 0:bbd179dfa0a7
135 bar 0:bbd179dfa0a7
130 tip 4:735c3ca72986
136 tip 4:735c3ca72986
131 bar 0:bbd179dfa0a7
137 bar 0:bbd179dfa0a7
132 % test tag rank with 3 heads
138 % test tag rank with 3 heads
133 adding foo
139 adding foo
134 tip 3:197c21bbbf2c
140 tip 3:197c21bbbf2c
135 bar 2:6fa450212aeb
141 bar 2:6fa450212aeb
136 % bar should still point to rev 2
142 % bar should still point to rev 2
137 tip 4:3b4b14ed0202
143 tip 4:3b4b14ed0202
138 bar 2:6fa450212aeb
144 bar 2:6fa450212aeb
139 % remove local as global and global as local
145 % remove local as global and global as local
140 adding foo
146 adding foo
141 abort: tag 'localtag' is not a global tag
147 abort: tag 'localtag' is not a global tag
142 abort: tag 'globaltag' is not a local tag
148 abort: tag 'globaltag' is not a local tag
143 tip 1:a0b6fe111088
149 tip 1:a0b6fe111088
144 localtag 0:bbd179dfa0a7 local
150 localtag 0:bbd179dfa0a7 local
145 globaltag 0:bbd179dfa0a7
151 globaltag 0:bbd179dfa0a7
General Comments 0
You need to be logged in to leave comments. Login now