##// END OF EJS Templates
commit: sort subrepos before committing for stable test output
Martin Geisler -
r12127:36a65283 default
parent child Browse files
Show More
@@ -1,1805 +1,1805 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.baseui = baseui
33 self.baseui = baseui
34 self.ui = baseui.copy()
34 self.ui = baseui.copy()
35
35
36 try:
36 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
38 extensions.loadall(self.ui)
39 except IOError:
39 except IOError:
40 pass
40 pass
41
41
42 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
43 if create:
43 if create:
44 if not os.path.exists(path):
44 if not os.path.exists(path):
45 util.makedirs(path)
45 util.makedirs(path)
46 os.mkdir(self.path)
46 os.mkdir(self.path)
47 requirements = ["revlogv1"]
47 requirements = ["revlogv1"]
48 if self.ui.configbool('format', 'usestore', True):
48 if self.ui.configbool('format', 'usestore', True):
49 os.mkdir(os.path.join(self.path, "store"))
49 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
50 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
51 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
52 requirements.append("fncache")
53 # create an invalid changelog
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
57 )
57 )
58 if self.ui.configbool('format', 'parentdelta', False):
58 if self.ui.configbool('format', 'parentdelta', False):
59 requirements.append("parentdelta")
59 requirements.append("parentdelta")
60 reqfile = self.opener("requires", "w")
60 reqfile = self.opener("requires", "w")
61 for r in requirements:
61 for r in requirements:
62 reqfile.write("%s\n" % r)
62 reqfile.write("%s\n" % r)
63 reqfile.close()
63 reqfile.close()
64 else:
64 else:
65 raise error.RepoError(_("repository %s not found") % path)
65 raise error.RepoError(_("repository %s not found") % path)
66 elif create:
66 elif create:
67 raise error.RepoError(_("repository %s already exists") % path)
67 raise error.RepoError(_("repository %s already exists") % path)
68 else:
68 else:
69 # find requirements
69 # find requirements
70 requirements = set()
70 requirements = set()
71 try:
71 try:
72 requirements = set(self.opener("requires").read().splitlines())
72 requirements = set(self.opener("requires").read().splitlines())
73 except IOError, inst:
73 except IOError, inst:
74 if inst.errno != errno.ENOENT:
74 if inst.errno != errno.ENOENT:
75 raise
75 raise
76 for r in requirements - self.supported:
76 for r in requirements - self.supported:
77 raise error.RepoError(_("requirement '%s' not supported") % r)
77 raise error.RepoError(_("requirement '%s' not supported") % r)
78
78
79 self.sharedpath = self.path
79 self.sharedpath = self.path
80 try:
80 try:
81 s = os.path.realpath(self.opener("sharedpath").read())
81 s = os.path.realpath(self.opener("sharedpath").read())
82 if not os.path.exists(s):
82 if not os.path.exists(s):
83 raise error.RepoError(
83 raise error.RepoError(
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 self.sharedpath = s
85 self.sharedpath = s
86 except IOError, inst:
86 except IOError, inst:
87 if inst.errno != errno.ENOENT:
87 if inst.errno != errno.ENOENT:
88 raise
88 raise
89
89
90 self.store = store.store(requirements, self.sharedpath, util.opener)
90 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.spath = self.store.path
91 self.spath = self.store.path
92 self.sopener = self.store.opener
92 self.sopener = self.store.opener
93 self.sjoin = self.store.join
93 self.sjoin = self.store.join
94 self.opener.createmode = self.store.createmode
94 self.opener.createmode = self.store.createmode
95 self.sopener.options = {}
95 self.sopener.options = {}
96 if 'parentdelta' in requirements:
96 if 'parentdelta' in requirements:
97 self.sopener.options['parentdelta'] = 1
97 self.sopener.options['parentdelta'] = 1
98
98
99 # These two define the set of tags for this repository. _tags
99 # These two define the set of tags for this repository. _tags
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # 'local'. (Global tags are defined by .hgtags across all
101 # 'local'. (Global tags are defined by .hgtags across all
102 # heads, and local tags are defined in .hg/localtags.) They
102 # heads, and local tags are defined in .hg/localtags.) They
103 # constitute the in-memory cache of tags.
103 # constitute the in-memory cache of tags.
104 self._tags = None
104 self._tags = None
105 self._tagtypes = None
105 self._tagtypes = None
106
106
107 self._branchcache = None # in UTF-8
107 self._branchcache = None # in UTF-8
108 self._branchcachetip = None
108 self._branchcachetip = None
109 self.nodetagscache = None
109 self.nodetagscache = None
110 self.filterpats = {}
110 self.filterpats = {}
111 self._datafilters = {}
111 self._datafilters = {}
112 self._transref = self._lockref = self._wlockref = None
112 self._transref = self._lockref = self._wlockref = None
113
113
114 @propertycache
114 @propertycache
115 def changelog(self):
115 def changelog(self):
116 c = changelog.changelog(self.sopener)
116 c = changelog.changelog(self.sopener)
117 if 'HG_PENDING' in os.environ:
117 if 'HG_PENDING' in os.environ:
118 p = os.environ['HG_PENDING']
118 p = os.environ['HG_PENDING']
119 if p.startswith(self.root):
119 if p.startswith(self.root):
120 c.readpending('00changelog.i.a')
120 c.readpending('00changelog.i.a')
121 self.sopener.options['defversion'] = c.version
121 self.sopener.options['defversion'] = c.version
122 return c
122 return c
123
123
124 @propertycache
124 @propertycache
125 def manifest(self):
125 def manifest(self):
126 return manifest.manifest(self.sopener)
126 return manifest.manifest(self.sopener)
127
127
128 @propertycache
128 @propertycache
129 def dirstate(self):
129 def dirstate(self):
130 return dirstate.dirstate(self.opener, self.ui, self.root)
130 return dirstate.dirstate(self.opener, self.ui, self.root)
131
131
132 def __getitem__(self, changeid):
132 def __getitem__(self, changeid):
133 if changeid is None:
133 if changeid is None:
134 return context.workingctx(self)
134 return context.workingctx(self)
135 return context.changectx(self, changeid)
135 return context.changectx(self, changeid)
136
136
137 def __contains__(self, changeid):
137 def __contains__(self, changeid):
138 try:
138 try:
139 return bool(self.lookup(changeid))
139 return bool(self.lookup(changeid))
140 except error.RepoLookupError:
140 except error.RepoLookupError:
141 return False
141 return False
142
142
143 def __nonzero__(self):
143 def __nonzero__(self):
144 return True
144 return True
145
145
146 def __len__(self):
146 def __len__(self):
147 return len(self.changelog)
147 return len(self.changelog)
148
148
149 def __iter__(self):
149 def __iter__(self):
150 for i in xrange(len(self)):
150 for i in xrange(len(self)):
151 yield i
151 yield i
152
152
153 def url(self):
153 def url(self):
154 return 'file:' + self.root
154 return 'file:' + self.root
155
155
156 def hook(self, name, throw=False, **args):
156 def hook(self, name, throw=False, **args):
157 return hook.hook(self.ui, self, name, throw, **args)
157 return hook.hook(self.ui, self, name, throw, **args)
158
158
159 tag_disallowed = ':\r\n'
159 tag_disallowed = ':\r\n'
160
160
161 def _tag(self, names, node, message, local, user, date, extra={}):
161 def _tag(self, names, node, message, local, user, date, extra={}):
162 if isinstance(names, str):
162 if isinstance(names, str):
163 allchars = names
163 allchars = names
164 names = (names,)
164 names = (names,)
165 else:
165 else:
166 allchars = ''.join(names)
166 allchars = ''.join(names)
167 for c in self.tag_disallowed:
167 for c in self.tag_disallowed:
168 if c in allchars:
168 if c in allchars:
169 raise util.Abort(_('%r cannot be used in a tag name') % c)
169 raise util.Abort(_('%r cannot be used in a tag name') % c)
170
170
171 branches = self.branchmap()
171 branches = self.branchmap()
172 for name in names:
172 for name in names:
173 self.hook('pretag', throw=True, node=hex(node), tag=name,
173 self.hook('pretag', throw=True, node=hex(node), tag=name,
174 local=local)
174 local=local)
175 if name in branches:
175 if name in branches:
176 self.ui.warn(_("warning: tag %s conflicts with existing"
176 self.ui.warn(_("warning: tag %s conflicts with existing"
177 " branch name\n") % name)
177 " branch name\n") % name)
178
178
179 def writetags(fp, names, munge, prevtags):
179 def writetags(fp, names, munge, prevtags):
180 fp.seek(0, 2)
180 fp.seek(0, 2)
181 if prevtags and prevtags[-1] != '\n':
181 if prevtags and prevtags[-1] != '\n':
182 fp.write('\n')
182 fp.write('\n')
183 for name in names:
183 for name in names:
184 m = munge and munge(name) or name
184 m = munge and munge(name) or name
185 if self._tagtypes and name in self._tagtypes:
185 if self._tagtypes and name in self._tagtypes:
186 old = self._tags.get(name, nullid)
186 old = self._tags.get(name, nullid)
187 fp.write('%s %s\n' % (hex(old), m))
187 fp.write('%s %s\n' % (hex(old), m))
188 fp.write('%s %s\n' % (hex(node), m))
188 fp.write('%s %s\n' % (hex(node), m))
189 fp.close()
189 fp.close()
190
190
191 prevtags = ''
191 prevtags = ''
192 if local:
192 if local:
193 try:
193 try:
194 fp = self.opener('localtags', 'r+')
194 fp = self.opener('localtags', 'r+')
195 except IOError:
195 except IOError:
196 fp = self.opener('localtags', 'a')
196 fp = self.opener('localtags', 'a')
197 else:
197 else:
198 prevtags = fp.read()
198 prevtags = fp.read()
199
199
200 # local tags are stored in the current charset
200 # local tags are stored in the current charset
201 writetags(fp, names, None, prevtags)
201 writetags(fp, names, None, prevtags)
202 for name in names:
202 for name in names:
203 self.hook('tag', node=hex(node), tag=name, local=local)
203 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
204 return
205
205
206 try:
206 try:
207 fp = self.wfile('.hgtags', 'rb+')
207 fp = self.wfile('.hgtags', 'rb+')
208 except IOError:
208 except IOError:
209 fp = self.wfile('.hgtags', 'ab')
209 fp = self.wfile('.hgtags', 'ab')
210 else:
210 else:
211 prevtags = fp.read()
211 prevtags = fp.read()
212
212
213 # committed tags are stored in UTF-8
213 # committed tags are stored in UTF-8
214 writetags(fp, names, encoding.fromlocal, prevtags)
214 writetags(fp, names, encoding.fromlocal, prevtags)
215
215
216 if '.hgtags' not in self.dirstate:
216 if '.hgtags' not in self.dirstate:
217 self[None].add(['.hgtags'])
217 self[None].add(['.hgtags'])
218
218
219 m = matchmod.exact(self.root, '', ['.hgtags'])
219 m = matchmod.exact(self.root, '', ['.hgtags'])
220 tagnode = self.commit(message, user, date, extra=extra, match=m)
220 tagnode = self.commit(message, user, date, extra=extra, match=m)
221
221
222 for name in names:
222 for name in names:
223 self.hook('tag', node=hex(node), tag=name, local=local)
223 self.hook('tag', node=hex(node), tag=name, local=local)
224
224
225 return tagnode
225 return tagnode
226
226
227 def tag(self, names, node, message, local, user, date):
227 def tag(self, names, node, message, local, user, date):
228 '''tag a revision with one or more symbolic names.
228 '''tag a revision with one or more symbolic names.
229
229
230 names is a list of strings or, when adding a single tag, names may be a
230 names is a list of strings or, when adding a single tag, names may be a
231 string.
231 string.
232
232
233 if local is True, the tags are stored in a per-repository file.
233 if local is True, the tags are stored in a per-repository file.
234 otherwise, they are stored in the .hgtags file, and a new
234 otherwise, they are stored in the .hgtags file, and a new
235 changeset is committed with the change.
235 changeset is committed with the change.
236
236
237 keyword arguments:
237 keyword arguments:
238
238
239 local: whether to store tags in non-version-controlled file
239 local: whether to store tags in non-version-controlled file
240 (default False)
240 (default False)
241
241
242 message: commit message to use if committing
242 message: commit message to use if committing
243
243
244 user: name of user to use if committing
244 user: name of user to use if committing
245
245
246 date: date tuple to use if committing'''
246 date: date tuple to use if committing'''
247
247
248 for x in self.status()[:5]:
248 for x in self.status()[:5]:
249 if '.hgtags' in x:
249 if '.hgtags' in x:
250 raise util.Abort(_('working copy of .hgtags is changed '
250 raise util.Abort(_('working copy of .hgtags is changed '
251 '(please commit .hgtags manually)'))
251 '(please commit .hgtags manually)'))
252
252
253 self.tags() # instantiate the cache
253 self.tags() # instantiate the cache
254 self._tag(names, node, message, local, user, date)
254 self._tag(names, node, message, local, user, date)
255
255
256 def tags(self):
256 def tags(self):
257 '''return a mapping of tag to node'''
257 '''return a mapping of tag to node'''
258 if self._tags is None:
258 if self._tags is None:
259 (self._tags, self._tagtypes) = self._findtags()
259 (self._tags, self._tagtypes) = self._findtags()
260
260
261 return self._tags
261 return self._tags
262
262
263 def _findtags(self):
263 def _findtags(self):
264 '''Do the hard work of finding tags. Return a pair of dicts
264 '''Do the hard work of finding tags. Return a pair of dicts
265 (tags, tagtypes) where tags maps tag name to node, and tagtypes
265 (tags, tagtypes) where tags maps tag name to node, and tagtypes
266 maps tag name to a string like \'global\' or \'local\'.
266 maps tag name to a string like \'global\' or \'local\'.
267 Subclasses or extensions are free to add their own tags, but
267 Subclasses or extensions are free to add their own tags, but
268 should be aware that the returned dicts will be retained for the
268 should be aware that the returned dicts will be retained for the
269 duration of the localrepo object.'''
269 duration of the localrepo object.'''
270
270
271 # XXX what tagtype should subclasses/extensions use? Currently
271 # XXX what tagtype should subclasses/extensions use? Currently
272 # mq and bookmarks add tags, but do not set the tagtype at all.
272 # mq and bookmarks add tags, but do not set the tagtype at all.
273 # Should each extension invent its own tag type? Should there
273 # Should each extension invent its own tag type? Should there
274 # be one tagtype for all such "virtual" tags? Or is the status
274 # be one tagtype for all such "virtual" tags? Or is the status
275 # quo fine?
275 # quo fine?
276
276
277 alltags = {} # map tag name to (node, hist)
277 alltags = {} # map tag name to (node, hist)
278 tagtypes = {}
278 tagtypes = {}
279
279
280 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
280 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
281 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
281 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
282
282
283 # Build the return dicts. Have to re-encode tag names because
283 # Build the return dicts. Have to re-encode tag names because
284 # the tags module always uses UTF-8 (in order not to lose info
284 # the tags module always uses UTF-8 (in order not to lose info
285 # writing to the cache), but the rest of Mercurial wants them in
285 # writing to the cache), but the rest of Mercurial wants them in
286 # local encoding.
286 # local encoding.
287 tags = {}
287 tags = {}
288 for (name, (node, hist)) in alltags.iteritems():
288 for (name, (node, hist)) in alltags.iteritems():
289 if node != nullid:
289 if node != nullid:
290 tags[encoding.tolocal(name)] = node
290 tags[encoding.tolocal(name)] = node
291 tags['tip'] = self.changelog.tip()
291 tags['tip'] = self.changelog.tip()
292 tagtypes = dict([(encoding.tolocal(name), value)
292 tagtypes = dict([(encoding.tolocal(name), value)
293 for (name, value) in tagtypes.iteritems()])
293 for (name, value) in tagtypes.iteritems()])
294 return (tags, tagtypes)
294 return (tags, tagtypes)
295
295
296 def tagtype(self, tagname):
296 def tagtype(self, tagname):
297 '''
297 '''
298 return the type of the given tag. result can be:
298 return the type of the given tag. result can be:
299
299
300 'local' : a local tag
300 'local' : a local tag
301 'global' : a global tag
301 'global' : a global tag
302 None : tag does not exist
302 None : tag does not exist
303 '''
303 '''
304
304
305 self.tags()
305 self.tags()
306
306
307 return self._tagtypes.get(tagname)
307 return self._tagtypes.get(tagname)
308
308
309 def tagslist(self):
309 def tagslist(self):
310 '''return a list of tags ordered by revision'''
310 '''return a list of tags ordered by revision'''
311 l = []
311 l = []
312 for t, n in self.tags().iteritems():
312 for t, n in self.tags().iteritems():
313 try:
313 try:
314 r = self.changelog.rev(n)
314 r = self.changelog.rev(n)
315 except:
315 except:
316 r = -2 # sort to the beginning of the list if unknown
316 r = -2 # sort to the beginning of the list if unknown
317 l.append((r, t, n))
317 l.append((r, t, n))
318 return [(t, n) for r, t, n in sorted(l)]
318 return [(t, n) for r, t, n in sorted(l)]
319
319
320 def nodetags(self, node):
320 def nodetags(self, node):
321 '''return the tags associated with a node'''
321 '''return the tags associated with a node'''
322 if not self.nodetagscache:
322 if not self.nodetagscache:
323 self.nodetagscache = {}
323 self.nodetagscache = {}
324 for t, n in self.tags().iteritems():
324 for t, n in self.tags().iteritems():
325 self.nodetagscache.setdefault(n, []).append(t)
325 self.nodetagscache.setdefault(n, []).append(t)
326 for tags in self.nodetagscache.itervalues():
326 for tags in self.nodetagscache.itervalues():
327 tags.sort()
327 tags.sort()
328 return self.nodetagscache.get(node, [])
328 return self.nodetagscache.get(node, [])
329
329
330 def _branchtags(self, partial, lrev):
330 def _branchtags(self, partial, lrev):
331 # TODO: rename this function?
331 # TODO: rename this function?
332 tiprev = len(self) - 1
332 tiprev = len(self) - 1
333 if lrev != tiprev:
333 if lrev != tiprev:
334 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
334 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
335 self._updatebranchcache(partial, ctxgen)
335 self._updatebranchcache(partial, ctxgen)
336 self._writebranchcache(partial, self.changelog.tip(), tiprev)
336 self._writebranchcache(partial, self.changelog.tip(), tiprev)
337
337
338 return partial
338 return partial
339
339
340 def updatebranchcache(self):
340 def updatebranchcache(self):
341 tip = self.changelog.tip()
341 tip = self.changelog.tip()
342 if self._branchcache is not None and self._branchcachetip == tip:
342 if self._branchcache is not None and self._branchcachetip == tip:
343 return self._branchcache
343 return self._branchcache
344
344
345 oldtip = self._branchcachetip
345 oldtip = self._branchcachetip
346 self._branchcachetip = tip
346 self._branchcachetip = tip
347 if oldtip is None or oldtip not in self.changelog.nodemap:
347 if oldtip is None or oldtip not in self.changelog.nodemap:
348 partial, last, lrev = self._readbranchcache()
348 partial, last, lrev = self._readbranchcache()
349 else:
349 else:
350 lrev = self.changelog.rev(oldtip)
350 lrev = self.changelog.rev(oldtip)
351 partial = self._branchcache
351 partial = self._branchcache
352
352
353 self._branchtags(partial, lrev)
353 self._branchtags(partial, lrev)
354 # this private cache holds all heads (not just tips)
354 # this private cache holds all heads (not just tips)
355 self._branchcache = partial
355 self._branchcache = partial
356
356
357 def branchmap(self):
357 def branchmap(self):
358 '''returns a dictionary {branch: [branchheads]}'''
358 '''returns a dictionary {branch: [branchheads]}'''
359 self.updatebranchcache()
359 self.updatebranchcache()
360 return self._branchcache
360 return self._branchcache
361
361
362 def branchtags(self):
362 def branchtags(self):
363 '''return a dict where branch names map to the tipmost head of
363 '''return a dict where branch names map to the tipmost head of
364 the branch, open heads come before closed'''
364 the branch, open heads come before closed'''
365 bt = {}
365 bt = {}
366 for bn, heads in self.branchmap().iteritems():
366 for bn, heads in self.branchmap().iteritems():
367 tip = heads[-1]
367 tip = heads[-1]
368 for h in reversed(heads):
368 for h in reversed(heads):
369 if 'close' not in self.changelog.read(h)[5]:
369 if 'close' not in self.changelog.read(h)[5]:
370 tip = h
370 tip = h
371 break
371 break
372 bt[bn] = tip
372 bt[bn] = tip
373 return bt
373 return bt
374
374
375
375
376 def _readbranchcache(self):
376 def _readbranchcache(self):
377 partial = {}
377 partial = {}
378 try:
378 try:
379 f = self.opener("branchheads.cache")
379 f = self.opener("branchheads.cache")
380 lines = f.read().split('\n')
380 lines = f.read().split('\n')
381 f.close()
381 f.close()
382 except (IOError, OSError):
382 except (IOError, OSError):
383 return {}, nullid, nullrev
383 return {}, nullid, nullrev
384
384
385 try:
385 try:
386 last, lrev = lines.pop(0).split(" ", 1)
386 last, lrev = lines.pop(0).split(" ", 1)
387 last, lrev = bin(last), int(lrev)
387 last, lrev = bin(last), int(lrev)
388 if lrev >= len(self) or self[lrev].node() != last:
388 if lrev >= len(self) or self[lrev].node() != last:
389 # invalidate the cache
389 # invalidate the cache
390 raise ValueError('invalidating branch cache (tip differs)')
390 raise ValueError('invalidating branch cache (tip differs)')
391 for l in lines:
391 for l in lines:
392 if not l:
392 if not l:
393 continue
393 continue
394 node, label = l.split(" ", 1)
394 node, label = l.split(" ", 1)
395 partial.setdefault(label.strip(), []).append(bin(node))
395 partial.setdefault(label.strip(), []).append(bin(node))
396 except KeyboardInterrupt:
396 except KeyboardInterrupt:
397 raise
397 raise
398 except Exception, inst:
398 except Exception, inst:
399 if self.ui.debugflag:
399 if self.ui.debugflag:
400 self.ui.warn(str(inst), '\n')
400 self.ui.warn(str(inst), '\n')
401 partial, last, lrev = {}, nullid, nullrev
401 partial, last, lrev = {}, nullid, nullrev
402 return partial, last, lrev
402 return partial, last, lrev
403
403
404 def _writebranchcache(self, branches, tip, tiprev):
404 def _writebranchcache(self, branches, tip, tiprev):
405 try:
405 try:
406 f = self.opener("branchheads.cache", "w", atomictemp=True)
406 f = self.opener("branchheads.cache", "w", atomictemp=True)
407 f.write("%s %s\n" % (hex(tip), tiprev))
407 f.write("%s %s\n" % (hex(tip), tiprev))
408 for label, nodes in branches.iteritems():
408 for label, nodes in branches.iteritems():
409 for node in nodes:
409 for node in nodes:
410 f.write("%s %s\n" % (hex(node), label))
410 f.write("%s %s\n" % (hex(node), label))
411 f.rename()
411 f.rename()
412 except (IOError, OSError):
412 except (IOError, OSError):
413 pass
413 pass
414
414
415 def _updatebranchcache(self, partial, ctxgen):
415 def _updatebranchcache(self, partial, ctxgen):
416 # collect new branch entries
416 # collect new branch entries
417 newbranches = {}
417 newbranches = {}
418 for c in ctxgen:
418 for c in ctxgen:
419 newbranches.setdefault(c.branch(), []).append(c.node())
419 newbranches.setdefault(c.branch(), []).append(c.node())
420 # if older branchheads are reachable from new ones, they aren't
420 # if older branchheads are reachable from new ones, they aren't
421 # really branchheads. Note checking parents is insufficient:
421 # really branchheads. Note checking parents is insufficient:
422 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
422 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
423 for branch, newnodes in newbranches.iteritems():
423 for branch, newnodes in newbranches.iteritems():
424 bheads = partial.setdefault(branch, [])
424 bheads = partial.setdefault(branch, [])
425 bheads.extend(newnodes)
425 bheads.extend(newnodes)
426 if len(bheads) <= 1:
426 if len(bheads) <= 1:
427 continue
427 continue
428 # starting from tip means fewer passes over reachable
428 # starting from tip means fewer passes over reachable
429 while newnodes:
429 while newnodes:
430 latest = newnodes.pop()
430 latest = newnodes.pop()
431 if latest not in bheads:
431 if latest not in bheads:
432 continue
432 continue
433 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
433 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
434 reachable = self.changelog.reachable(latest, minbhrev)
434 reachable = self.changelog.reachable(latest, minbhrev)
435 reachable.remove(latest)
435 reachable.remove(latest)
436 bheads = [b for b in bheads if b not in reachable]
436 bheads = [b for b in bheads if b not in reachable]
437 partial[branch] = bheads
437 partial[branch] = bheads
438
438
439 def lookup(self, key):
439 def lookup(self, key):
440 if isinstance(key, int):
440 if isinstance(key, int):
441 return self.changelog.node(key)
441 return self.changelog.node(key)
442 elif key == '.':
442 elif key == '.':
443 return self.dirstate.parents()[0]
443 return self.dirstate.parents()[0]
444 elif key == 'null':
444 elif key == 'null':
445 return nullid
445 return nullid
446 elif key == 'tip':
446 elif key == 'tip':
447 return self.changelog.tip()
447 return self.changelog.tip()
448 n = self.changelog._match(key)
448 n = self.changelog._match(key)
449 if n:
449 if n:
450 return n
450 return n
451 if key in self.tags():
451 if key in self.tags():
452 return self.tags()[key]
452 return self.tags()[key]
453 if key in self.branchtags():
453 if key in self.branchtags():
454 return self.branchtags()[key]
454 return self.branchtags()[key]
455 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
456 if n:
456 if n:
457 return n
457 return n
458
458
459 # can't find key, check if it might have come from damaged dirstate
459 # can't find key, check if it might have come from damaged dirstate
460 if key in self.dirstate.parents():
460 if key in self.dirstate.parents():
461 raise error.Abort(_("working directory has unknown parent '%s'!")
461 raise error.Abort(_("working directory has unknown parent '%s'!")
462 % short(key))
462 % short(key))
463 try:
463 try:
464 if len(key) == 20:
464 if len(key) == 20:
465 key = hex(key)
465 key = hex(key)
466 except:
466 except:
467 pass
467 pass
468 raise error.RepoLookupError(_("unknown revision '%s'") % key)
468 raise error.RepoLookupError(_("unknown revision '%s'") % key)
469
469
470 def lookupbranch(self, key, remote=None):
470 def lookupbranch(self, key, remote=None):
471 repo = remote or self
471 repo = remote or self
472 if key in repo.branchmap():
472 if key in repo.branchmap():
473 return key
473 return key
474
474
475 repo = (remote and remote.local()) and remote or self
475 repo = (remote and remote.local()) and remote or self
476 return repo[key].branch()
476 return repo[key].branch()
477
477
478 def local(self):
478 def local(self):
479 return True
479 return True
480
480
481 def join(self, f):
481 def join(self, f):
482 return os.path.join(self.path, f)
482 return os.path.join(self.path, f)
483
483
484 def wjoin(self, f):
484 def wjoin(self, f):
485 return os.path.join(self.root, f)
485 return os.path.join(self.root, f)
486
486
487 def file(self, f):
487 def file(self, f):
488 if f[0] == '/':
488 if f[0] == '/':
489 f = f[1:]
489 f = f[1:]
490 return filelog.filelog(self.sopener, f)
490 return filelog.filelog(self.sopener, f)
491
491
492 def changectx(self, changeid):
492 def changectx(self, changeid):
493 return self[changeid]
493 return self[changeid]
494
494
495 def parents(self, changeid=None):
495 def parents(self, changeid=None):
496 '''get list of changectxs for parents of changeid'''
496 '''get list of changectxs for parents of changeid'''
497 return self[changeid].parents()
497 return self[changeid].parents()
498
498
499 def filectx(self, path, changeid=None, fileid=None):
499 def filectx(self, path, changeid=None, fileid=None):
500 """changeid can be a changeset revision, node, or tag.
500 """changeid can be a changeset revision, node, or tag.
501 fileid can be a file revision or node."""
501 fileid can be a file revision or node."""
502 return context.filectx(self, path, changeid, fileid)
502 return context.filectx(self, path, changeid, fileid)
503
503
504 def getcwd(self):
504 def getcwd(self):
505 return self.dirstate.getcwd()
505 return self.dirstate.getcwd()
506
506
507 def pathto(self, f, cwd=None):
507 def pathto(self, f, cwd=None):
508 return self.dirstate.pathto(f, cwd)
508 return self.dirstate.pathto(f, cwd)
509
509
510 def wfile(self, f, mode='r'):
510 def wfile(self, f, mode='r'):
511 return self.wopener(f, mode)
511 return self.wopener(f, mode)
512
512
513 def _link(self, f):
513 def _link(self, f):
514 return os.path.islink(self.wjoin(f))
514 return os.path.islink(self.wjoin(f))
515
515
516 def _loadfilter(self, filter):
516 def _loadfilter(self, filter):
517 if filter not in self.filterpats:
517 if filter not in self.filterpats:
518 l = []
518 l = []
519 for pat, cmd in self.ui.configitems(filter):
519 for pat, cmd in self.ui.configitems(filter):
520 if cmd == '!':
520 if cmd == '!':
521 continue
521 continue
522 mf = matchmod.match(self.root, '', [pat])
522 mf = matchmod.match(self.root, '', [pat])
523 fn = None
523 fn = None
524 params = cmd
524 params = cmd
525 for name, filterfn in self._datafilters.iteritems():
525 for name, filterfn in self._datafilters.iteritems():
526 if cmd.startswith(name):
526 if cmd.startswith(name):
527 fn = filterfn
527 fn = filterfn
528 params = cmd[len(name):].lstrip()
528 params = cmd[len(name):].lstrip()
529 break
529 break
530 if not fn:
530 if not fn:
531 fn = lambda s, c, **kwargs: util.filter(s, c)
531 fn = lambda s, c, **kwargs: util.filter(s, c)
532 # Wrap old filters not supporting keyword arguments
532 # Wrap old filters not supporting keyword arguments
533 if not inspect.getargspec(fn)[2]:
533 if not inspect.getargspec(fn)[2]:
534 oldfn = fn
534 oldfn = fn
535 fn = lambda s, c, **kwargs: oldfn(s, c)
535 fn = lambda s, c, **kwargs: oldfn(s, c)
536 l.append((mf, fn, params))
536 l.append((mf, fn, params))
537 self.filterpats[filter] = l
537 self.filterpats[filter] = l
538
538
539 def _filter(self, filter, filename, data):
539 def _filter(self, filter, filename, data):
540 self._loadfilter(filter)
540 self._loadfilter(filter)
541
541
542 for mf, fn, cmd in self.filterpats[filter]:
542 for mf, fn, cmd in self.filterpats[filter]:
543 if mf(filename):
543 if mf(filename):
544 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
544 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
545 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
545 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
546 break
546 break
547
547
548 return data
548 return data
549
549
550 def adddatafilter(self, name, filter):
550 def adddatafilter(self, name, filter):
551 self._datafilters[name] = filter
551 self._datafilters[name] = filter
552
552
553 def wread(self, filename):
553 def wread(self, filename):
554 if self._link(filename):
554 if self._link(filename):
555 data = os.readlink(self.wjoin(filename))
555 data = os.readlink(self.wjoin(filename))
556 else:
556 else:
557 data = self.wopener(filename, 'r').read()
557 data = self.wopener(filename, 'r').read()
558 return self._filter("encode", filename, data)
558 return self._filter("encode", filename, data)
559
559
560 def wwrite(self, filename, data, flags):
560 def wwrite(self, filename, data, flags):
561 data = self._filter("decode", filename, data)
561 data = self._filter("decode", filename, data)
562 try:
562 try:
563 os.unlink(self.wjoin(filename))
563 os.unlink(self.wjoin(filename))
564 except OSError:
564 except OSError:
565 pass
565 pass
566 if 'l' in flags:
566 if 'l' in flags:
567 self.wopener.symlink(data, filename)
567 self.wopener.symlink(data, filename)
568 else:
568 else:
569 self.wopener(filename, 'w').write(data)
569 self.wopener(filename, 'w').write(data)
570 if 'x' in flags:
570 if 'x' in flags:
571 util.set_flags(self.wjoin(filename), False, True)
571 util.set_flags(self.wjoin(filename), False, True)
572
572
573 def wwritedata(self, filename, data):
573 def wwritedata(self, filename, data):
574 return self._filter("decode", filename, data)
574 return self._filter("decode", filename, data)
575
575
576 def transaction(self, desc):
576 def transaction(self, desc):
577 tr = self._transref and self._transref() or None
577 tr = self._transref and self._transref() or None
578 if tr and tr.running():
578 if tr and tr.running():
579 return tr.nest()
579 return tr.nest()
580
580
581 # abort here if the journal already exists
581 # abort here if the journal already exists
582 if os.path.exists(self.sjoin("journal")):
582 if os.path.exists(self.sjoin("journal")):
583 raise error.RepoError(
583 raise error.RepoError(
584 _("abandoned transaction found - run hg recover"))
584 _("abandoned transaction found - run hg recover"))
585
585
586 # save dirstate for rollback
586 # save dirstate for rollback
587 try:
587 try:
588 ds = self.opener("dirstate").read()
588 ds = self.opener("dirstate").read()
589 except IOError:
589 except IOError:
590 ds = ""
590 ds = ""
591 self.opener("journal.dirstate", "w").write(ds)
591 self.opener("journal.dirstate", "w").write(ds)
592 self.opener("journal.branch", "w").write(self.dirstate.branch())
592 self.opener("journal.branch", "w").write(self.dirstate.branch())
593 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
593 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
594
594
595 renames = [(self.sjoin("journal"), self.sjoin("undo")),
595 renames = [(self.sjoin("journal"), self.sjoin("undo")),
596 (self.join("journal.dirstate"), self.join("undo.dirstate")),
596 (self.join("journal.dirstate"), self.join("undo.dirstate")),
597 (self.join("journal.branch"), self.join("undo.branch")),
597 (self.join("journal.branch"), self.join("undo.branch")),
598 (self.join("journal.desc"), self.join("undo.desc"))]
598 (self.join("journal.desc"), self.join("undo.desc"))]
599 tr = transaction.transaction(self.ui.warn, self.sopener,
599 tr = transaction.transaction(self.ui.warn, self.sopener,
600 self.sjoin("journal"),
600 self.sjoin("journal"),
601 aftertrans(renames),
601 aftertrans(renames),
602 self.store.createmode)
602 self.store.createmode)
603 self._transref = weakref.ref(tr)
603 self._transref = weakref.ref(tr)
604 return tr
604 return tr
605
605
606 def recover(self):
606 def recover(self):
607 lock = self.lock()
607 lock = self.lock()
608 try:
608 try:
609 if os.path.exists(self.sjoin("journal")):
609 if os.path.exists(self.sjoin("journal")):
610 self.ui.status(_("rolling back interrupted transaction\n"))
610 self.ui.status(_("rolling back interrupted transaction\n"))
611 transaction.rollback(self.sopener, self.sjoin("journal"),
611 transaction.rollback(self.sopener, self.sjoin("journal"),
612 self.ui.warn)
612 self.ui.warn)
613 self.invalidate()
613 self.invalidate()
614 return True
614 return True
615 else:
615 else:
616 self.ui.warn(_("no interrupted transaction available\n"))
616 self.ui.warn(_("no interrupted transaction available\n"))
617 return False
617 return False
618 finally:
618 finally:
619 lock.release()
619 lock.release()
620
620
621 def rollback(self, dryrun=False):
621 def rollback(self, dryrun=False):
622 wlock = lock = None
622 wlock = lock = None
623 try:
623 try:
624 wlock = self.wlock()
624 wlock = self.wlock()
625 lock = self.lock()
625 lock = self.lock()
626 if os.path.exists(self.sjoin("undo")):
626 if os.path.exists(self.sjoin("undo")):
627 try:
627 try:
628 args = self.opener("undo.desc", "r").read().splitlines()
628 args = self.opener("undo.desc", "r").read().splitlines()
629 if len(args) >= 3 and self.ui.verbose:
629 if len(args) >= 3 and self.ui.verbose:
630 desc = _("rolling back to revision %s"
630 desc = _("rolling back to revision %s"
631 " (undo %s: %s)\n") % (
631 " (undo %s: %s)\n") % (
632 int(args[0]) - 1, args[1], args[2])
632 int(args[0]) - 1, args[1], args[2])
633 elif len(args) >= 2:
633 elif len(args) >= 2:
634 desc = _("rolling back to revision %s (undo %s)\n") % (
634 desc = _("rolling back to revision %s (undo %s)\n") % (
635 int(args[0]) - 1, args[1])
635 int(args[0]) - 1, args[1])
636 except IOError:
636 except IOError:
637 desc = _("rolling back unknown transaction\n")
637 desc = _("rolling back unknown transaction\n")
638 self.ui.status(desc)
638 self.ui.status(desc)
639 if dryrun:
639 if dryrun:
640 return
640 return
641 transaction.rollback(self.sopener, self.sjoin("undo"),
641 transaction.rollback(self.sopener, self.sjoin("undo"),
642 self.ui.warn)
642 self.ui.warn)
643 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
643 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
644 try:
644 try:
645 branch = self.opener("undo.branch").read()
645 branch = self.opener("undo.branch").read()
646 self.dirstate.setbranch(branch)
646 self.dirstate.setbranch(branch)
647 except IOError:
647 except IOError:
648 self.ui.warn(_("Named branch could not be reset, "
648 self.ui.warn(_("Named branch could not be reset, "
649 "current branch still is: %s\n")
649 "current branch still is: %s\n")
650 % encoding.tolocal(self.dirstate.branch()))
650 % encoding.tolocal(self.dirstate.branch()))
651 self.invalidate()
651 self.invalidate()
652 self.dirstate.invalidate()
652 self.dirstate.invalidate()
653 self.destroyed()
653 self.destroyed()
654 else:
654 else:
655 self.ui.warn(_("no rollback information available\n"))
655 self.ui.warn(_("no rollback information available\n"))
656 return 1
656 return 1
657 finally:
657 finally:
658 release(lock, wlock)
658 release(lock, wlock)
659
659
660 def invalidatecaches(self):
660 def invalidatecaches(self):
661 self._tags = None
661 self._tags = None
662 self._tagtypes = None
662 self._tagtypes = None
663 self.nodetagscache = None
663 self.nodetagscache = None
664 self._branchcache = None # in UTF-8
664 self._branchcache = None # in UTF-8
665 self._branchcachetip = None
665 self._branchcachetip = None
666
666
667 def invalidate(self):
667 def invalidate(self):
668 for a in "changelog manifest".split():
668 for a in "changelog manifest".split():
669 if a in self.__dict__:
669 if a in self.__dict__:
670 delattr(self, a)
670 delattr(self, a)
671 self.invalidatecaches()
671 self.invalidatecaches()
672
672
673 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
673 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
674 try:
674 try:
675 l = lock.lock(lockname, 0, releasefn, desc=desc)
675 l = lock.lock(lockname, 0, releasefn, desc=desc)
676 except error.LockHeld, inst:
676 except error.LockHeld, inst:
677 if not wait:
677 if not wait:
678 raise
678 raise
679 self.ui.warn(_("waiting for lock on %s held by %r\n") %
679 self.ui.warn(_("waiting for lock on %s held by %r\n") %
680 (desc, inst.locker))
680 (desc, inst.locker))
681 # default to 600 seconds timeout
681 # default to 600 seconds timeout
682 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
682 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
683 releasefn, desc=desc)
683 releasefn, desc=desc)
684 if acquirefn:
684 if acquirefn:
685 acquirefn()
685 acquirefn()
686 return l
686 return l
687
687
688 def lock(self, wait=True):
688 def lock(self, wait=True):
689 '''Lock the repository store (.hg/store) and return a weak reference
689 '''Lock the repository store (.hg/store) and return a weak reference
690 to the lock. Use this before modifying the store (e.g. committing or
690 to the lock. Use this before modifying the store (e.g. committing or
691 stripping). If you are opening a transaction, get a lock as well.)'''
691 stripping). If you are opening a transaction, get a lock as well.)'''
692 l = self._lockref and self._lockref()
692 l = self._lockref and self._lockref()
693 if l is not None and l.held:
693 if l is not None and l.held:
694 l.lock()
694 l.lock()
695 return l
695 return l
696
696
697 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
697 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
698 _('repository %s') % self.origroot)
698 _('repository %s') % self.origroot)
699 self._lockref = weakref.ref(l)
699 self._lockref = weakref.ref(l)
700 return l
700 return l
701
701
702 def wlock(self, wait=True):
702 def wlock(self, wait=True):
703 '''Lock the non-store parts of the repository (everything under
703 '''Lock the non-store parts of the repository (everything under
704 .hg except .hg/store) and return a weak reference to the lock.
704 .hg except .hg/store) and return a weak reference to the lock.
705 Use this before modifying files in .hg.'''
705 Use this before modifying files in .hg.'''
706 l = self._wlockref and self._wlockref()
706 l = self._wlockref and self._wlockref()
707 if l is not None and l.held:
707 if l is not None and l.held:
708 l.lock()
708 l.lock()
709 return l
709 return l
710
710
711 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
711 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
712 self.dirstate.invalidate, _('working directory of %s') %
712 self.dirstate.invalidate, _('working directory of %s') %
713 self.origroot)
713 self.origroot)
714 self._wlockref = weakref.ref(l)
714 self._wlockref = weakref.ref(l)
715 return l
715 return l
716
716
717 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
717 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
718 """
718 """
719 commit an individual file as part of a larger transaction
719 commit an individual file as part of a larger transaction
720 """
720 """
721
721
722 fname = fctx.path()
722 fname = fctx.path()
723 text = fctx.data()
723 text = fctx.data()
724 flog = self.file(fname)
724 flog = self.file(fname)
725 fparent1 = manifest1.get(fname, nullid)
725 fparent1 = manifest1.get(fname, nullid)
726 fparent2 = fparent2o = manifest2.get(fname, nullid)
726 fparent2 = fparent2o = manifest2.get(fname, nullid)
727
727
728 meta = {}
728 meta = {}
729 copy = fctx.renamed()
729 copy = fctx.renamed()
730 if copy and copy[0] != fname:
730 if copy and copy[0] != fname:
731 # Mark the new revision of this file as a copy of another
731 # Mark the new revision of this file as a copy of another
732 # file. This copy data will effectively act as a parent
732 # file. This copy data will effectively act as a parent
733 # of this new revision. If this is a merge, the first
733 # of this new revision. If this is a merge, the first
734 # parent will be the nullid (meaning "look up the copy data")
734 # parent will be the nullid (meaning "look up the copy data")
735 # and the second one will be the other parent. For example:
735 # and the second one will be the other parent. For example:
736 #
736 #
737 # 0 --- 1 --- 3 rev1 changes file foo
737 # 0 --- 1 --- 3 rev1 changes file foo
738 # \ / rev2 renames foo to bar and changes it
738 # \ / rev2 renames foo to bar and changes it
739 # \- 2 -/ rev3 should have bar with all changes and
739 # \- 2 -/ rev3 should have bar with all changes and
740 # should record that bar descends from
740 # should record that bar descends from
741 # bar in rev2 and foo in rev1
741 # bar in rev2 and foo in rev1
742 #
742 #
743 # this allows this merge to succeed:
743 # this allows this merge to succeed:
744 #
744 #
745 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
745 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
746 # \ / merging rev3 and rev4 should use bar@rev2
746 # \ / merging rev3 and rev4 should use bar@rev2
747 # \- 2 --- 4 as the merge base
747 # \- 2 --- 4 as the merge base
748 #
748 #
749
749
750 cfname = copy[0]
750 cfname = copy[0]
751 crev = manifest1.get(cfname)
751 crev = manifest1.get(cfname)
752 newfparent = fparent2
752 newfparent = fparent2
753
753
754 if manifest2: # branch merge
754 if manifest2: # branch merge
755 if fparent2 == nullid or crev is None: # copied on remote side
755 if fparent2 == nullid or crev is None: # copied on remote side
756 if cfname in manifest2:
756 if cfname in manifest2:
757 crev = manifest2[cfname]
757 crev = manifest2[cfname]
758 newfparent = fparent1
758 newfparent = fparent1
759
759
760 # find source in nearest ancestor if we've lost track
760 # find source in nearest ancestor if we've lost track
761 if not crev:
761 if not crev:
762 self.ui.debug(" %s: searching for copy revision for %s\n" %
762 self.ui.debug(" %s: searching for copy revision for %s\n" %
763 (fname, cfname))
763 (fname, cfname))
764 for ancestor in self['.'].ancestors():
764 for ancestor in self['.'].ancestors():
765 if cfname in ancestor:
765 if cfname in ancestor:
766 crev = ancestor[cfname].filenode()
766 crev = ancestor[cfname].filenode()
767 break
767 break
768
768
769 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
769 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
770 meta["copy"] = cfname
770 meta["copy"] = cfname
771 meta["copyrev"] = hex(crev)
771 meta["copyrev"] = hex(crev)
772 fparent1, fparent2 = nullid, newfparent
772 fparent1, fparent2 = nullid, newfparent
773 elif fparent2 != nullid:
773 elif fparent2 != nullid:
774 # is one parent an ancestor of the other?
774 # is one parent an ancestor of the other?
775 fparentancestor = flog.ancestor(fparent1, fparent2)
775 fparentancestor = flog.ancestor(fparent1, fparent2)
776 if fparentancestor == fparent1:
776 if fparentancestor == fparent1:
777 fparent1, fparent2 = fparent2, nullid
777 fparent1, fparent2 = fparent2, nullid
778 elif fparentancestor == fparent2:
778 elif fparentancestor == fparent2:
779 fparent2 = nullid
779 fparent2 = nullid
780
780
781 # is the file changed?
781 # is the file changed?
782 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
782 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
783 changelist.append(fname)
783 changelist.append(fname)
784 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
784 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
785
785
786 # are just the flags changed during merge?
786 # are just the flags changed during merge?
787 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
787 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
788 changelist.append(fname)
788 changelist.append(fname)
789
789
790 return fparent1
790 return fparent1
791
791
792 def commit(self, text="", user=None, date=None, match=None, force=False,
792 def commit(self, text="", user=None, date=None, match=None, force=False,
793 editor=False, extra={}):
793 editor=False, extra={}):
794 """Add a new revision to current repository.
794 """Add a new revision to current repository.
795
795
796 Revision information is gathered from the working directory,
796 Revision information is gathered from the working directory,
797 match can be used to filter the committed files. If editor is
797 match can be used to filter the committed files. If editor is
798 supplied, it is called to get a commit message.
798 supplied, it is called to get a commit message.
799 """
799 """
800
800
801 def fail(f, msg):
801 def fail(f, msg):
802 raise util.Abort('%s: %s' % (f, msg))
802 raise util.Abort('%s: %s' % (f, msg))
803
803
804 if not match:
804 if not match:
805 match = matchmod.always(self.root, '')
805 match = matchmod.always(self.root, '')
806
806
807 if not force:
807 if not force:
808 vdirs = []
808 vdirs = []
809 match.dir = vdirs.append
809 match.dir = vdirs.append
810 match.bad = fail
810 match.bad = fail
811
811
812 wlock = self.wlock()
812 wlock = self.wlock()
813 try:
813 try:
814 wctx = self[None]
814 wctx = self[None]
815 merge = len(wctx.parents()) > 1
815 merge = len(wctx.parents()) > 1
816
816
817 if (not force and merge and match and
817 if (not force and merge and match and
818 (match.files() or match.anypats())):
818 (match.files() or match.anypats())):
819 raise util.Abort(_('cannot partially commit a merge '
819 raise util.Abort(_('cannot partially commit a merge '
820 '(do not specify files or patterns)'))
820 '(do not specify files or patterns)'))
821
821
822 changes = self.status(match=match, clean=force)
822 changes = self.status(match=match, clean=force)
823 if force:
823 if force:
824 changes[0].extend(changes[6]) # mq may commit unchanged files
824 changes[0].extend(changes[6]) # mq may commit unchanged files
825
825
826 # check subrepos
826 # check subrepos
827 subs = []
827 subs = []
828 removedsubs = set()
828 removedsubs = set()
829 for p in wctx.parents():
829 for p in wctx.parents():
830 removedsubs.update(s for s in p.substate if match(s))
830 removedsubs.update(s for s in p.substate if match(s))
831 for s in wctx.substate:
831 for s in wctx.substate:
832 removedsubs.discard(s)
832 removedsubs.discard(s)
833 if match(s) and wctx.sub(s).dirty():
833 if match(s) and wctx.sub(s).dirty():
834 subs.append(s)
834 subs.append(s)
835 if (subs or removedsubs):
835 if (subs or removedsubs):
836 if (not match('.hgsub') and
836 if (not match('.hgsub') and
837 '.hgsub' in (wctx.modified() + wctx.added())):
837 '.hgsub' in (wctx.modified() + wctx.added())):
838 raise util.Abort(_("can't commit subrepos without .hgsub"))
838 raise util.Abort(_("can't commit subrepos without .hgsub"))
839 if '.hgsubstate' not in changes[0]:
839 if '.hgsubstate' not in changes[0]:
840 changes[0].insert(0, '.hgsubstate')
840 changes[0].insert(0, '.hgsubstate')
841
841
842 # make sure all explicit patterns are matched
842 # make sure all explicit patterns are matched
843 if not force and match.files():
843 if not force and match.files():
844 matched = set(changes[0] + changes[1] + changes[2])
844 matched = set(changes[0] + changes[1] + changes[2])
845
845
846 for f in match.files():
846 for f in match.files():
847 if f == '.' or f in matched or f in wctx.substate:
847 if f == '.' or f in matched or f in wctx.substate:
848 continue
848 continue
849 if f in changes[3]: # missing
849 if f in changes[3]: # missing
850 fail(f, _('file not found!'))
850 fail(f, _('file not found!'))
851 if f in vdirs: # visited directory
851 if f in vdirs: # visited directory
852 d = f + '/'
852 d = f + '/'
853 for mf in matched:
853 for mf in matched:
854 if mf.startswith(d):
854 if mf.startswith(d):
855 break
855 break
856 else:
856 else:
857 fail(f, _("no match under directory!"))
857 fail(f, _("no match under directory!"))
858 elif f not in self.dirstate:
858 elif f not in self.dirstate:
859 fail(f, _("file not tracked!"))
859 fail(f, _("file not tracked!"))
860
860
861 if (not force and not extra.get("close") and not merge
861 if (not force and not extra.get("close") and not merge
862 and not (changes[0] or changes[1] or changes[2])
862 and not (changes[0] or changes[1] or changes[2])
863 and wctx.branch() == wctx.p1().branch()):
863 and wctx.branch() == wctx.p1().branch()):
864 return None
864 return None
865
865
866 ms = mergemod.mergestate(self)
866 ms = mergemod.mergestate(self)
867 for f in changes[0]:
867 for f in changes[0]:
868 if f in ms and ms[f] == 'u':
868 if f in ms and ms[f] == 'u':
869 raise util.Abort(_("unresolved merge conflicts "
869 raise util.Abort(_("unresolved merge conflicts "
870 "(see hg resolve)"))
870 "(see hg resolve)"))
871
871
872 cctx = context.workingctx(self, text, user, date, extra, changes)
872 cctx = context.workingctx(self, text, user, date, extra, changes)
873 if editor:
873 if editor:
874 cctx._text = editor(self, cctx, subs)
874 cctx._text = editor(self, cctx, subs)
875 edited = (text != cctx._text)
875 edited = (text != cctx._text)
876
876
877 # commit subs
877 # commit subs
878 if subs or removedsubs:
878 if subs or removedsubs:
879 state = wctx.substate.copy()
879 state = wctx.substate.copy()
880 for s in subs:
880 for s in sorted(subs):
881 sub = wctx.sub(s)
881 sub = wctx.sub(s)
882 self.ui.status(_('committing subrepository %s\n') %
882 self.ui.status(_('committing subrepository %s\n') %
883 subrepo.relpath(sub))
883 subrepo.relpath(sub))
884 sr = sub.commit(cctx._text, user, date)
884 sr = sub.commit(cctx._text, user, date)
885 state[s] = (state[s][0], sr)
885 state[s] = (state[s][0], sr)
886 subrepo.writestate(self, state)
886 subrepo.writestate(self, state)
887
887
888 # Save commit message in case this transaction gets rolled back
888 # Save commit message in case this transaction gets rolled back
889 # (e.g. by a pretxncommit hook). Leave the content alone on
889 # (e.g. by a pretxncommit hook). Leave the content alone on
890 # the assumption that the user will use the same editor again.
890 # the assumption that the user will use the same editor again.
891 msgfile = self.opener('last-message.txt', 'wb')
891 msgfile = self.opener('last-message.txt', 'wb')
892 msgfile.write(cctx._text)
892 msgfile.write(cctx._text)
893 msgfile.close()
893 msgfile.close()
894
894
895 p1, p2 = self.dirstate.parents()
895 p1, p2 = self.dirstate.parents()
896 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
896 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
897 try:
897 try:
898 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
898 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
899 ret = self.commitctx(cctx, True)
899 ret = self.commitctx(cctx, True)
900 except:
900 except:
901 if edited:
901 if edited:
902 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
902 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
903 self.ui.write(
903 self.ui.write(
904 _('note: commit message saved in %s\n') % msgfn)
904 _('note: commit message saved in %s\n') % msgfn)
905 raise
905 raise
906
906
907 # update dirstate and mergestate
907 # update dirstate and mergestate
908 for f in changes[0] + changes[1]:
908 for f in changes[0] + changes[1]:
909 self.dirstate.normal(f)
909 self.dirstate.normal(f)
910 for f in changes[2]:
910 for f in changes[2]:
911 self.dirstate.forget(f)
911 self.dirstate.forget(f)
912 self.dirstate.setparents(ret)
912 self.dirstate.setparents(ret)
913 ms.reset()
913 ms.reset()
914 finally:
914 finally:
915 wlock.release()
915 wlock.release()
916
916
917 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
917 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
918 return ret
918 return ret
919
919
920 def commitctx(self, ctx, error=False):
920 def commitctx(self, ctx, error=False):
921 """Add a new revision to current repository.
921 """Add a new revision to current repository.
922 Revision information is passed via the context argument.
922 Revision information is passed via the context argument.
923 """
923 """
924
924
925 tr = lock = None
925 tr = lock = None
926 removed = ctx.removed()
926 removed = ctx.removed()
927 p1, p2 = ctx.p1(), ctx.p2()
927 p1, p2 = ctx.p1(), ctx.p2()
928 m1 = p1.manifest().copy()
928 m1 = p1.manifest().copy()
929 m2 = p2.manifest()
929 m2 = p2.manifest()
930 user = ctx.user()
930 user = ctx.user()
931
931
932 lock = self.lock()
932 lock = self.lock()
933 try:
933 try:
934 tr = self.transaction("commit")
934 tr = self.transaction("commit")
935 trp = weakref.proxy(tr)
935 trp = weakref.proxy(tr)
936
936
937 # check in files
937 # check in files
938 new = {}
938 new = {}
939 changed = []
939 changed = []
940 linkrev = len(self)
940 linkrev = len(self)
941 for f in sorted(ctx.modified() + ctx.added()):
941 for f in sorted(ctx.modified() + ctx.added()):
942 self.ui.note(f + "\n")
942 self.ui.note(f + "\n")
943 try:
943 try:
944 fctx = ctx[f]
944 fctx = ctx[f]
945 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
945 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
946 changed)
946 changed)
947 m1.set(f, fctx.flags())
947 m1.set(f, fctx.flags())
948 except OSError, inst:
948 except OSError, inst:
949 self.ui.warn(_("trouble committing %s!\n") % f)
949 self.ui.warn(_("trouble committing %s!\n") % f)
950 raise
950 raise
951 except IOError, inst:
951 except IOError, inst:
952 errcode = getattr(inst, 'errno', errno.ENOENT)
952 errcode = getattr(inst, 'errno', errno.ENOENT)
953 if error or errcode and errcode != errno.ENOENT:
953 if error or errcode and errcode != errno.ENOENT:
954 self.ui.warn(_("trouble committing %s!\n") % f)
954 self.ui.warn(_("trouble committing %s!\n") % f)
955 raise
955 raise
956 else:
956 else:
957 removed.append(f)
957 removed.append(f)
958
958
959 # update manifest
959 # update manifest
960 m1.update(new)
960 m1.update(new)
961 removed = [f for f in sorted(removed) if f in m1 or f in m2]
961 removed = [f for f in sorted(removed) if f in m1 or f in m2]
962 drop = [f for f in removed if f in m1]
962 drop = [f for f in removed if f in m1]
963 for f in drop:
963 for f in drop:
964 del m1[f]
964 del m1[f]
965 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
965 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
966 p2.manifestnode(), (new, drop))
966 p2.manifestnode(), (new, drop))
967
967
968 # update changelog
968 # update changelog
969 self.changelog.delayupdate()
969 self.changelog.delayupdate()
970 n = self.changelog.add(mn, changed + removed, ctx.description(),
970 n = self.changelog.add(mn, changed + removed, ctx.description(),
971 trp, p1.node(), p2.node(),
971 trp, p1.node(), p2.node(),
972 user, ctx.date(), ctx.extra().copy())
972 user, ctx.date(), ctx.extra().copy())
973 p = lambda: self.changelog.writepending() and self.root or ""
973 p = lambda: self.changelog.writepending() and self.root or ""
974 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
974 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
975 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
975 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
976 parent2=xp2, pending=p)
976 parent2=xp2, pending=p)
977 self.changelog.finalize(trp)
977 self.changelog.finalize(trp)
978 tr.close()
978 tr.close()
979
979
980 if self._branchcache:
980 if self._branchcache:
981 self.updatebranchcache()
981 self.updatebranchcache()
982 return n
982 return n
983 finally:
983 finally:
984 if tr:
984 if tr:
985 tr.release()
985 tr.release()
986 lock.release()
986 lock.release()
987
987
988 def destroyed(self):
988 def destroyed(self):
989 '''Inform the repository that nodes have been destroyed.
989 '''Inform the repository that nodes have been destroyed.
990 Intended for use by strip and rollback, so there's a common
990 Intended for use by strip and rollback, so there's a common
991 place for anything that has to be done after destroying history.'''
991 place for anything that has to be done after destroying history.'''
992 # XXX it might be nice if we could take the list of destroyed
992 # XXX it might be nice if we could take the list of destroyed
993 # nodes, but I don't see an easy way for rollback() to do that
993 # nodes, but I don't see an easy way for rollback() to do that
994
994
995 # Ensure the persistent tag cache is updated. Doing it now
995 # Ensure the persistent tag cache is updated. Doing it now
996 # means that the tag cache only has to worry about destroyed
996 # means that the tag cache only has to worry about destroyed
997 # heads immediately after a strip/rollback. That in turn
997 # heads immediately after a strip/rollback. That in turn
998 # guarantees that "cachetip == currenttip" (comparing both rev
998 # guarantees that "cachetip == currenttip" (comparing both rev
999 # and node) always means no nodes have been added or destroyed.
999 # and node) always means no nodes have been added or destroyed.
1000
1000
1001 # XXX this is suboptimal when qrefresh'ing: we strip the current
1001 # XXX this is suboptimal when qrefresh'ing: we strip the current
1002 # head, refresh the tag cache, then immediately add a new head.
1002 # head, refresh the tag cache, then immediately add a new head.
1003 # But I think doing it this way is necessary for the "instant
1003 # But I think doing it this way is necessary for the "instant
1004 # tag cache retrieval" case to work.
1004 # tag cache retrieval" case to work.
1005 self.invalidatecaches()
1005 self.invalidatecaches()
1006
1006
1007 def walk(self, match, node=None):
1007 def walk(self, match, node=None):
1008 '''
1008 '''
1009 walk recursively through the directory tree or a given
1009 walk recursively through the directory tree or a given
1010 changeset, finding all files matched by the match
1010 changeset, finding all files matched by the match
1011 function
1011 function
1012 '''
1012 '''
1013 return self[node].walk(match)
1013 return self[node].walk(match)
1014
1014
1015 def status(self, node1='.', node2=None, match=None,
1015 def status(self, node1='.', node2=None, match=None,
1016 ignored=False, clean=False, unknown=False):
1016 ignored=False, clean=False, unknown=False):
1017 """return status of files between two nodes or node and working directory
1017 """return status of files between two nodes or node and working directory
1018
1018
1019 If node1 is None, use the first dirstate parent instead.
1019 If node1 is None, use the first dirstate parent instead.
1020 If node2 is None, compare node1 with working directory.
1020 If node2 is None, compare node1 with working directory.
1021 """
1021 """
1022
1022
1023 def mfmatches(ctx):
1023 def mfmatches(ctx):
1024 mf = ctx.manifest().copy()
1024 mf = ctx.manifest().copy()
1025 for fn in mf.keys():
1025 for fn in mf.keys():
1026 if not match(fn):
1026 if not match(fn):
1027 del mf[fn]
1027 del mf[fn]
1028 return mf
1028 return mf
1029
1029
1030 if isinstance(node1, context.changectx):
1030 if isinstance(node1, context.changectx):
1031 ctx1 = node1
1031 ctx1 = node1
1032 else:
1032 else:
1033 ctx1 = self[node1]
1033 ctx1 = self[node1]
1034 if isinstance(node2, context.changectx):
1034 if isinstance(node2, context.changectx):
1035 ctx2 = node2
1035 ctx2 = node2
1036 else:
1036 else:
1037 ctx2 = self[node2]
1037 ctx2 = self[node2]
1038
1038
1039 working = ctx2.rev() is None
1039 working = ctx2.rev() is None
1040 parentworking = working and ctx1 == self['.']
1040 parentworking = working and ctx1 == self['.']
1041 match = match or matchmod.always(self.root, self.getcwd())
1041 match = match or matchmod.always(self.root, self.getcwd())
1042 listignored, listclean, listunknown = ignored, clean, unknown
1042 listignored, listclean, listunknown = ignored, clean, unknown
1043
1043
1044 # load earliest manifest first for caching reasons
1044 # load earliest manifest first for caching reasons
1045 if not working and ctx2.rev() < ctx1.rev():
1045 if not working and ctx2.rev() < ctx1.rev():
1046 ctx2.manifest()
1046 ctx2.manifest()
1047
1047
1048 if not parentworking:
1048 if not parentworking:
1049 def bad(f, msg):
1049 def bad(f, msg):
1050 if f not in ctx1:
1050 if f not in ctx1:
1051 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1051 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1052 match.bad = bad
1052 match.bad = bad
1053
1053
1054 if working: # we need to scan the working dir
1054 if working: # we need to scan the working dir
1055 subrepos = []
1055 subrepos = []
1056 if '.hgsub' in self.dirstate:
1056 if '.hgsub' in self.dirstate:
1057 subrepos = ctx1.substate.keys()
1057 subrepos = ctx1.substate.keys()
1058 s = self.dirstate.status(match, subrepos, listignored,
1058 s = self.dirstate.status(match, subrepos, listignored,
1059 listclean, listunknown)
1059 listclean, listunknown)
1060 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1060 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1061
1061
1062 # check for any possibly clean files
1062 # check for any possibly clean files
1063 if parentworking and cmp:
1063 if parentworking and cmp:
1064 fixup = []
1064 fixup = []
1065 # do a full compare of any files that might have changed
1065 # do a full compare of any files that might have changed
1066 for f in sorted(cmp):
1066 for f in sorted(cmp):
1067 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1067 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1068 or ctx1[f].cmp(ctx2[f])):
1068 or ctx1[f].cmp(ctx2[f])):
1069 modified.append(f)
1069 modified.append(f)
1070 else:
1070 else:
1071 fixup.append(f)
1071 fixup.append(f)
1072
1072
1073 # update dirstate for files that are actually clean
1073 # update dirstate for files that are actually clean
1074 if fixup:
1074 if fixup:
1075 if listclean:
1075 if listclean:
1076 clean += fixup
1076 clean += fixup
1077
1077
1078 try:
1078 try:
1079 # updating the dirstate is optional
1079 # updating the dirstate is optional
1080 # so we don't wait on the lock
1080 # so we don't wait on the lock
1081 wlock = self.wlock(False)
1081 wlock = self.wlock(False)
1082 try:
1082 try:
1083 for f in fixup:
1083 for f in fixup:
1084 self.dirstate.normal(f)
1084 self.dirstate.normal(f)
1085 finally:
1085 finally:
1086 wlock.release()
1086 wlock.release()
1087 except error.LockError:
1087 except error.LockError:
1088 pass
1088 pass
1089
1089
1090 if not parentworking:
1090 if not parentworking:
1091 mf1 = mfmatches(ctx1)
1091 mf1 = mfmatches(ctx1)
1092 if working:
1092 if working:
1093 # we are comparing working dir against non-parent
1093 # we are comparing working dir against non-parent
1094 # generate a pseudo-manifest for the working dir
1094 # generate a pseudo-manifest for the working dir
1095 mf2 = mfmatches(self['.'])
1095 mf2 = mfmatches(self['.'])
1096 for f in cmp + modified + added:
1096 for f in cmp + modified + added:
1097 mf2[f] = None
1097 mf2[f] = None
1098 mf2.set(f, ctx2.flags(f))
1098 mf2.set(f, ctx2.flags(f))
1099 for f in removed:
1099 for f in removed:
1100 if f in mf2:
1100 if f in mf2:
1101 del mf2[f]
1101 del mf2[f]
1102 else:
1102 else:
1103 # we are comparing two revisions
1103 # we are comparing two revisions
1104 deleted, unknown, ignored = [], [], []
1104 deleted, unknown, ignored = [], [], []
1105 mf2 = mfmatches(ctx2)
1105 mf2 = mfmatches(ctx2)
1106
1106
1107 modified, added, clean = [], [], []
1107 modified, added, clean = [], [], []
1108 for fn in mf2:
1108 for fn in mf2:
1109 if fn in mf1:
1109 if fn in mf1:
1110 if (mf1.flags(fn) != mf2.flags(fn) or
1110 if (mf1.flags(fn) != mf2.flags(fn) or
1111 (mf1[fn] != mf2[fn] and
1111 (mf1[fn] != mf2[fn] and
1112 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1112 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1113 modified.append(fn)
1113 modified.append(fn)
1114 elif listclean:
1114 elif listclean:
1115 clean.append(fn)
1115 clean.append(fn)
1116 del mf1[fn]
1116 del mf1[fn]
1117 else:
1117 else:
1118 added.append(fn)
1118 added.append(fn)
1119 removed = mf1.keys()
1119 removed = mf1.keys()
1120
1120
1121 r = modified, added, removed, deleted, unknown, ignored, clean
1121 r = modified, added, removed, deleted, unknown, ignored, clean
1122 [l.sort() for l in r]
1122 [l.sort() for l in r]
1123 return r
1123 return r
1124
1124
1125 def heads(self, start=None):
1125 def heads(self, start=None):
1126 heads = self.changelog.heads(start)
1126 heads = self.changelog.heads(start)
1127 # sort the output in rev descending order
1127 # sort the output in rev descending order
1128 heads = [(-self.changelog.rev(h), h) for h in heads]
1128 heads = [(-self.changelog.rev(h), h) for h in heads]
1129 return [n for (r, n) in sorted(heads)]
1129 return [n for (r, n) in sorted(heads)]
1130
1130
1131 def branchheads(self, branch=None, start=None, closed=False):
1131 def branchheads(self, branch=None, start=None, closed=False):
1132 '''return a (possibly filtered) list of heads for the given branch
1132 '''return a (possibly filtered) list of heads for the given branch
1133
1133
1134 Heads are returned in topological order, from newest to oldest.
1134 Heads are returned in topological order, from newest to oldest.
1135 If branch is None, use the dirstate branch.
1135 If branch is None, use the dirstate branch.
1136 If start is not None, return only heads reachable from start.
1136 If start is not None, return only heads reachable from start.
1137 If closed is True, return heads that are marked as closed as well.
1137 If closed is True, return heads that are marked as closed as well.
1138 '''
1138 '''
1139 if branch is None:
1139 if branch is None:
1140 branch = self[None].branch()
1140 branch = self[None].branch()
1141 branches = self.branchmap()
1141 branches = self.branchmap()
1142 if branch not in branches:
1142 if branch not in branches:
1143 return []
1143 return []
1144 # the cache returns heads ordered lowest to highest
1144 # the cache returns heads ordered lowest to highest
1145 bheads = list(reversed(branches[branch]))
1145 bheads = list(reversed(branches[branch]))
1146 if start is not None:
1146 if start is not None:
1147 # filter out the heads that cannot be reached from startrev
1147 # filter out the heads that cannot be reached from startrev
1148 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1148 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1149 bheads = [h for h in bheads if h in fbheads]
1149 bheads = [h for h in bheads if h in fbheads]
1150 if not closed:
1150 if not closed:
1151 bheads = [h for h in bheads if
1151 bheads = [h for h in bheads if
1152 ('close' not in self.changelog.read(h)[5])]
1152 ('close' not in self.changelog.read(h)[5])]
1153 return bheads
1153 return bheads
1154
1154
1155 def branches(self, nodes):
1155 def branches(self, nodes):
1156 if not nodes:
1156 if not nodes:
1157 nodes = [self.changelog.tip()]
1157 nodes = [self.changelog.tip()]
1158 b = []
1158 b = []
1159 for n in nodes:
1159 for n in nodes:
1160 t = n
1160 t = n
1161 while 1:
1161 while 1:
1162 p = self.changelog.parents(n)
1162 p = self.changelog.parents(n)
1163 if p[1] != nullid or p[0] == nullid:
1163 if p[1] != nullid or p[0] == nullid:
1164 b.append((t, n, p[0], p[1]))
1164 b.append((t, n, p[0], p[1]))
1165 break
1165 break
1166 n = p[0]
1166 n = p[0]
1167 return b
1167 return b
1168
1168
1169 def between(self, pairs):
1169 def between(self, pairs):
1170 r = []
1170 r = []
1171
1171
1172 for top, bottom in pairs:
1172 for top, bottom in pairs:
1173 n, l, i = top, [], 0
1173 n, l, i = top, [], 0
1174 f = 1
1174 f = 1
1175
1175
1176 while n != bottom and n != nullid:
1176 while n != bottom and n != nullid:
1177 p = self.changelog.parents(n)[0]
1177 p = self.changelog.parents(n)[0]
1178 if i == f:
1178 if i == f:
1179 l.append(n)
1179 l.append(n)
1180 f = f * 2
1180 f = f * 2
1181 n = p
1181 n = p
1182 i += 1
1182 i += 1
1183
1183
1184 r.append(l)
1184 r.append(l)
1185
1185
1186 return r
1186 return r
1187
1187
1188 def pull(self, remote, heads=None, force=False):
1188 def pull(self, remote, heads=None, force=False):
1189 lock = self.lock()
1189 lock = self.lock()
1190 try:
1190 try:
1191 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1191 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1192 force=force)
1192 force=force)
1193 common, fetch, rheads = tmp
1193 common, fetch, rheads = tmp
1194 if not fetch:
1194 if not fetch:
1195 self.ui.status(_("no changes found\n"))
1195 self.ui.status(_("no changes found\n"))
1196 return 0
1196 return 0
1197
1197
1198 if fetch == [nullid]:
1198 if fetch == [nullid]:
1199 self.ui.status(_("requesting all changes\n"))
1199 self.ui.status(_("requesting all changes\n"))
1200 elif heads is None and remote.capable('changegroupsubset'):
1200 elif heads is None and remote.capable('changegroupsubset'):
1201 # issue1320, avoid a race if remote changed after discovery
1201 # issue1320, avoid a race if remote changed after discovery
1202 heads = rheads
1202 heads = rheads
1203
1203
1204 if heads is None:
1204 if heads is None:
1205 cg = remote.changegroup(fetch, 'pull')
1205 cg = remote.changegroup(fetch, 'pull')
1206 else:
1206 else:
1207 if not remote.capable('changegroupsubset'):
1207 if not remote.capable('changegroupsubset'):
1208 raise util.Abort(_("partial pull cannot be done because "
1208 raise util.Abort(_("partial pull cannot be done because "
1209 "other repository doesn't support "
1209 "other repository doesn't support "
1210 "changegroupsubset."))
1210 "changegroupsubset."))
1211 cg = remote.changegroupsubset(fetch, heads, 'pull')
1211 cg = remote.changegroupsubset(fetch, heads, 'pull')
1212 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1212 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1213 finally:
1213 finally:
1214 lock.release()
1214 lock.release()
1215
1215
1216 def push(self, remote, force=False, revs=None, newbranch=False):
1216 def push(self, remote, force=False, revs=None, newbranch=False):
1217 '''Push outgoing changesets (limited by revs) from the current
1217 '''Push outgoing changesets (limited by revs) from the current
1218 repository to remote. Return an integer:
1218 repository to remote. Return an integer:
1219 - 0 means HTTP error *or* nothing to push
1219 - 0 means HTTP error *or* nothing to push
1220 - 1 means we pushed and remote head count is unchanged *or*
1220 - 1 means we pushed and remote head count is unchanged *or*
1221 we have outgoing changesets but refused to push
1221 we have outgoing changesets but refused to push
1222 - other values as described by addchangegroup()
1222 - other values as described by addchangegroup()
1223 '''
1223 '''
1224 # there are two ways to push to remote repo:
1224 # there are two ways to push to remote repo:
1225 #
1225 #
1226 # addchangegroup assumes local user can lock remote
1226 # addchangegroup assumes local user can lock remote
1227 # repo (local filesystem, old ssh servers).
1227 # repo (local filesystem, old ssh servers).
1228 #
1228 #
1229 # unbundle assumes local user cannot lock remote repo (new ssh
1229 # unbundle assumes local user cannot lock remote repo (new ssh
1230 # servers, http servers).
1230 # servers, http servers).
1231
1231
1232 lock = None
1232 lock = None
1233 unbundle = remote.capable('unbundle')
1233 unbundle = remote.capable('unbundle')
1234 if not unbundle:
1234 if not unbundle:
1235 lock = remote.lock()
1235 lock = remote.lock()
1236 try:
1236 try:
1237 ret = discovery.prepush(self, remote, force, revs, newbranch)
1237 ret = discovery.prepush(self, remote, force, revs, newbranch)
1238 if ret[0] is None:
1238 if ret[0] is None:
1239 # and here we return 0 for "nothing to push" or 1 for
1239 # and here we return 0 for "nothing to push" or 1 for
1240 # "something to push but I refuse"
1240 # "something to push but I refuse"
1241 return ret[1]
1241 return ret[1]
1242
1242
1243 cg, remote_heads = ret
1243 cg, remote_heads = ret
1244 if unbundle:
1244 if unbundle:
1245 # local repo finds heads on server, finds out what revs it must
1245 # local repo finds heads on server, finds out what revs it must
1246 # push. once revs transferred, if server finds it has
1246 # push. once revs transferred, if server finds it has
1247 # different heads (someone else won commit/push race), server
1247 # different heads (someone else won commit/push race), server
1248 # aborts.
1248 # aborts.
1249 if force:
1249 if force:
1250 remote_heads = ['force']
1250 remote_heads = ['force']
1251 # ssh: return remote's addchangegroup()
1251 # ssh: return remote's addchangegroup()
1252 # http: return remote's addchangegroup() or 0 for error
1252 # http: return remote's addchangegroup() or 0 for error
1253 return remote.unbundle(cg, remote_heads, 'push')
1253 return remote.unbundle(cg, remote_heads, 'push')
1254 else:
1254 else:
1255 # we return an integer indicating remote head count change
1255 # we return an integer indicating remote head count change
1256 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1256 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1257 finally:
1257 finally:
1258 if lock is not None:
1258 if lock is not None:
1259 lock.release()
1259 lock.release()
1260
1260
1261 def changegroupinfo(self, nodes, source):
1261 def changegroupinfo(self, nodes, source):
1262 if self.ui.verbose or source == 'bundle':
1262 if self.ui.verbose or source == 'bundle':
1263 self.ui.status(_("%d changesets found\n") % len(nodes))
1263 self.ui.status(_("%d changesets found\n") % len(nodes))
1264 if self.ui.debugflag:
1264 if self.ui.debugflag:
1265 self.ui.debug("list of changesets:\n")
1265 self.ui.debug("list of changesets:\n")
1266 for node in nodes:
1266 for node in nodes:
1267 self.ui.debug("%s\n" % hex(node))
1267 self.ui.debug("%s\n" % hex(node))
1268
1268
1269 def changegroupsubset(self, bases, heads, source, extranodes=None):
1269 def changegroupsubset(self, bases, heads, source, extranodes=None):
1270 """Compute a changegroup consisting of all the nodes that are
1270 """Compute a changegroup consisting of all the nodes that are
1271 descendents of any of the bases and ancestors of any of the heads.
1271 descendents of any of the bases and ancestors of any of the heads.
1272 Return a chunkbuffer object whose read() method will return
1272 Return a chunkbuffer object whose read() method will return
1273 successive changegroup chunks.
1273 successive changegroup chunks.
1274
1274
1275 It is fairly complex as determining which filenodes and which
1275 It is fairly complex as determining which filenodes and which
1276 manifest nodes need to be included for the changeset to be complete
1276 manifest nodes need to be included for the changeset to be complete
1277 is non-trivial.
1277 is non-trivial.
1278
1278
1279 Another wrinkle is doing the reverse, figuring out which changeset in
1279 Another wrinkle is doing the reverse, figuring out which changeset in
1280 the changegroup a particular filenode or manifestnode belongs to.
1280 the changegroup a particular filenode or manifestnode belongs to.
1281
1281
1282 The caller can specify some nodes that must be included in the
1282 The caller can specify some nodes that must be included in the
1283 changegroup using the extranodes argument. It should be a dict
1283 changegroup using the extranodes argument. It should be a dict
1284 where the keys are the filenames (or 1 for the manifest), and the
1284 where the keys are the filenames (or 1 for the manifest), and the
1285 values are lists of (node, linknode) tuples, where node is a wanted
1285 values are lists of (node, linknode) tuples, where node is a wanted
1286 node and linknode is the changelog node that should be transmitted as
1286 node and linknode is the changelog node that should be transmitted as
1287 the linkrev.
1287 the linkrev.
1288 """
1288 """
1289
1289
1290 # Set up some initial variables
1290 # Set up some initial variables
1291 # Make it easy to refer to self.changelog
1291 # Make it easy to refer to self.changelog
1292 cl = self.changelog
1292 cl = self.changelog
1293 # Compute the list of changesets in this changegroup.
1293 # Compute the list of changesets in this changegroup.
1294 # Some bases may turn out to be superfluous, and some heads may be
1294 # Some bases may turn out to be superfluous, and some heads may be
1295 # too. nodesbetween will return the minimal set of bases and heads
1295 # too. nodesbetween will return the minimal set of bases and heads
1296 # necessary to re-create the changegroup.
1296 # necessary to re-create the changegroup.
1297 if not bases:
1297 if not bases:
1298 bases = [nullid]
1298 bases = [nullid]
1299 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1299 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1300
1300
1301 if extranodes is None:
1301 if extranodes is None:
1302 # can we go through the fast path ?
1302 # can we go through the fast path ?
1303 heads.sort()
1303 heads.sort()
1304 allheads = self.heads()
1304 allheads = self.heads()
1305 allheads.sort()
1305 allheads.sort()
1306 if heads == allheads:
1306 if heads == allheads:
1307 return self._changegroup(msng_cl_lst, source)
1307 return self._changegroup(msng_cl_lst, source)
1308
1308
1309 # slow path
1309 # slow path
1310 self.hook('preoutgoing', throw=True, source=source)
1310 self.hook('preoutgoing', throw=True, source=source)
1311
1311
1312 self.changegroupinfo(msng_cl_lst, source)
1312 self.changegroupinfo(msng_cl_lst, source)
1313
1313
1314 # We assume that all ancestors of bases are known
1314 # We assume that all ancestors of bases are known
1315 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1315 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1316
1316
1317 # Make it easy to refer to self.manifest
1317 # Make it easy to refer to self.manifest
1318 mnfst = self.manifest
1318 mnfst = self.manifest
1319 # We don't know which manifests are missing yet
1319 # We don't know which manifests are missing yet
1320 msng_mnfst_set = {}
1320 msng_mnfst_set = {}
1321 # Nor do we know which filenodes are missing.
1321 # Nor do we know which filenodes are missing.
1322 msng_filenode_set = {}
1322 msng_filenode_set = {}
1323
1323
1324 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1324 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1325 junk = None
1325 junk = None
1326
1326
1327 # A changeset always belongs to itself, so the changenode lookup
1327 # A changeset always belongs to itself, so the changenode lookup
1328 # function for a changenode is identity.
1328 # function for a changenode is identity.
1329 def identity(x):
1329 def identity(x):
1330 return x
1330 return x
1331
1331
1332 # A function generating function that sets up the initial environment
1332 # A function generating function that sets up the initial environment
1333 # the inner function.
1333 # the inner function.
1334 def filenode_collector(changedfiles):
1334 def filenode_collector(changedfiles):
1335 # This gathers information from each manifestnode included in the
1335 # This gathers information from each manifestnode included in the
1336 # changegroup about which filenodes the manifest node references
1336 # changegroup about which filenodes the manifest node references
1337 # so we can include those in the changegroup too.
1337 # so we can include those in the changegroup too.
1338 #
1338 #
1339 # It also remembers which changenode each filenode belongs to. It
1339 # It also remembers which changenode each filenode belongs to. It
1340 # does this by assuming the a filenode belongs to the changenode
1340 # does this by assuming the a filenode belongs to the changenode
1341 # the first manifest that references it belongs to.
1341 # the first manifest that references it belongs to.
1342 def collect_msng_filenodes(mnfstnode):
1342 def collect_msng_filenodes(mnfstnode):
1343 r = mnfst.rev(mnfstnode)
1343 r = mnfst.rev(mnfstnode)
1344 if r - 1 in mnfst.parentrevs(r):
1344 if r - 1 in mnfst.parentrevs(r):
1345 # If the previous rev is one of the parents,
1345 # If the previous rev is one of the parents,
1346 # we only need to see a diff.
1346 # we only need to see a diff.
1347 deltamf = mnfst.readdelta(mnfstnode)
1347 deltamf = mnfst.readdelta(mnfstnode)
1348 # For each line in the delta
1348 # For each line in the delta
1349 for f, fnode in deltamf.iteritems():
1349 for f, fnode in deltamf.iteritems():
1350 # And if the file is in the list of files we care
1350 # And if the file is in the list of files we care
1351 # about.
1351 # about.
1352 if f in changedfiles:
1352 if f in changedfiles:
1353 # Get the changenode this manifest belongs to
1353 # Get the changenode this manifest belongs to
1354 clnode = msng_mnfst_set[mnfstnode]
1354 clnode = msng_mnfst_set[mnfstnode]
1355 # Create the set of filenodes for the file if
1355 # Create the set of filenodes for the file if
1356 # there isn't one already.
1356 # there isn't one already.
1357 ndset = msng_filenode_set.setdefault(f, {})
1357 ndset = msng_filenode_set.setdefault(f, {})
1358 # And set the filenode's changelog node to the
1358 # And set the filenode's changelog node to the
1359 # manifest's if it hasn't been set already.
1359 # manifest's if it hasn't been set already.
1360 ndset.setdefault(fnode, clnode)
1360 ndset.setdefault(fnode, clnode)
1361 else:
1361 else:
1362 # Otherwise we need a full manifest.
1362 # Otherwise we need a full manifest.
1363 m = mnfst.read(mnfstnode)
1363 m = mnfst.read(mnfstnode)
1364 # For every file in we care about.
1364 # For every file in we care about.
1365 for f in changedfiles:
1365 for f in changedfiles:
1366 fnode = m.get(f, None)
1366 fnode = m.get(f, None)
1367 # If it's in the manifest
1367 # If it's in the manifest
1368 if fnode is not None:
1368 if fnode is not None:
1369 # See comments above.
1369 # See comments above.
1370 clnode = msng_mnfst_set[mnfstnode]
1370 clnode = msng_mnfst_set[mnfstnode]
1371 ndset = msng_filenode_set.setdefault(f, {})
1371 ndset = msng_filenode_set.setdefault(f, {})
1372 ndset.setdefault(fnode, clnode)
1372 ndset.setdefault(fnode, clnode)
1373 return collect_msng_filenodes
1373 return collect_msng_filenodes
1374
1374
1375 # If we determine that a particular file or manifest node must be a
1375 # If we determine that a particular file or manifest node must be a
1376 # node that the recipient of the changegroup will already have, we can
1376 # node that the recipient of the changegroup will already have, we can
1377 # also assume the recipient will have all the parents. This function
1377 # also assume the recipient will have all the parents. This function
1378 # prunes them from the set of missing nodes.
1378 # prunes them from the set of missing nodes.
1379 def prune(revlog, missingnodes):
1379 def prune(revlog, missingnodes):
1380 hasset = set()
1380 hasset = set()
1381 # If a 'missing' filenode thinks it belongs to a changenode we
1381 # If a 'missing' filenode thinks it belongs to a changenode we
1382 # assume the recipient must have, then the recipient must have
1382 # assume the recipient must have, then the recipient must have
1383 # that filenode.
1383 # that filenode.
1384 for n in missingnodes:
1384 for n in missingnodes:
1385 clrev = revlog.linkrev(revlog.rev(n))
1385 clrev = revlog.linkrev(revlog.rev(n))
1386 if clrev in commonrevs:
1386 if clrev in commonrevs:
1387 hasset.add(n)
1387 hasset.add(n)
1388 for n in hasset:
1388 for n in hasset:
1389 missingnodes.pop(n, None)
1389 missingnodes.pop(n, None)
1390 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1390 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1391 missingnodes.pop(revlog.node(r), None)
1391 missingnodes.pop(revlog.node(r), None)
1392
1392
1393 # Add the nodes that were explicitly requested.
1393 # Add the nodes that were explicitly requested.
1394 def add_extra_nodes(name, nodes):
1394 def add_extra_nodes(name, nodes):
1395 if not extranodes or name not in extranodes:
1395 if not extranodes or name not in extranodes:
1396 return
1396 return
1397
1397
1398 for node, linknode in extranodes[name]:
1398 for node, linknode in extranodes[name]:
1399 if node not in nodes:
1399 if node not in nodes:
1400 nodes[node] = linknode
1400 nodes[node] = linknode
1401
1401
1402 # Now that we have all theses utility functions to help out and
1402 # Now that we have all theses utility functions to help out and
1403 # logically divide up the task, generate the group.
1403 # logically divide up the task, generate the group.
1404 def gengroup():
1404 def gengroup():
1405 # The set of changed files starts empty.
1405 # The set of changed files starts empty.
1406 changedfiles = set()
1406 changedfiles = set()
1407 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1407 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1408
1408
1409 # Create a changenode group generator that will call our functions
1409 # Create a changenode group generator that will call our functions
1410 # back to lookup the owning changenode and collect information.
1410 # back to lookup the owning changenode and collect information.
1411 group = cl.group(msng_cl_lst, identity, collect)
1411 group = cl.group(msng_cl_lst, identity, collect)
1412 for cnt, chnk in enumerate(group):
1412 for cnt, chnk in enumerate(group):
1413 yield chnk
1413 yield chnk
1414 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1414 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1415 self.ui.progress(_('bundling changes'), None)
1415 self.ui.progress(_('bundling changes'), None)
1416
1416
1417 prune(mnfst, msng_mnfst_set)
1417 prune(mnfst, msng_mnfst_set)
1418 add_extra_nodes(1, msng_mnfst_set)
1418 add_extra_nodes(1, msng_mnfst_set)
1419 msng_mnfst_lst = msng_mnfst_set.keys()
1419 msng_mnfst_lst = msng_mnfst_set.keys()
1420 # Sort the manifestnodes by revision number.
1420 # Sort the manifestnodes by revision number.
1421 msng_mnfst_lst.sort(key=mnfst.rev)
1421 msng_mnfst_lst.sort(key=mnfst.rev)
1422 # Create a generator for the manifestnodes that calls our lookup
1422 # Create a generator for the manifestnodes that calls our lookup
1423 # and data collection functions back.
1423 # and data collection functions back.
1424 group = mnfst.group(msng_mnfst_lst,
1424 group = mnfst.group(msng_mnfst_lst,
1425 lambda mnode: msng_mnfst_set[mnode],
1425 lambda mnode: msng_mnfst_set[mnode],
1426 filenode_collector(changedfiles))
1426 filenode_collector(changedfiles))
1427 for cnt, chnk in enumerate(group):
1427 for cnt, chnk in enumerate(group):
1428 yield chnk
1428 yield chnk
1429 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1429 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1430 self.ui.progress(_('bundling manifests'), None)
1430 self.ui.progress(_('bundling manifests'), None)
1431
1431
1432 # These are no longer needed, dereference and toss the memory for
1432 # These are no longer needed, dereference and toss the memory for
1433 # them.
1433 # them.
1434 msng_mnfst_lst = None
1434 msng_mnfst_lst = None
1435 msng_mnfst_set.clear()
1435 msng_mnfst_set.clear()
1436
1436
1437 if extranodes:
1437 if extranodes:
1438 for fname in extranodes:
1438 for fname in extranodes:
1439 if isinstance(fname, int):
1439 if isinstance(fname, int):
1440 continue
1440 continue
1441 msng_filenode_set.setdefault(fname, {})
1441 msng_filenode_set.setdefault(fname, {})
1442 changedfiles.add(fname)
1442 changedfiles.add(fname)
1443 # Go through all our files in order sorted by name.
1443 # Go through all our files in order sorted by name.
1444 cnt = 0
1444 cnt = 0
1445 for fname in sorted(changedfiles):
1445 for fname in sorted(changedfiles):
1446 filerevlog = self.file(fname)
1446 filerevlog = self.file(fname)
1447 if not len(filerevlog):
1447 if not len(filerevlog):
1448 raise util.Abort(_("empty or missing revlog for %s") % fname)
1448 raise util.Abort(_("empty or missing revlog for %s") % fname)
1449 # Toss out the filenodes that the recipient isn't really
1449 # Toss out the filenodes that the recipient isn't really
1450 # missing.
1450 # missing.
1451 missingfnodes = msng_filenode_set.pop(fname, {})
1451 missingfnodes = msng_filenode_set.pop(fname, {})
1452 prune(filerevlog, missingfnodes)
1452 prune(filerevlog, missingfnodes)
1453 add_extra_nodes(fname, missingfnodes)
1453 add_extra_nodes(fname, missingfnodes)
1454 # If any filenodes are left, generate the group for them,
1454 # If any filenodes are left, generate the group for them,
1455 # otherwise don't bother.
1455 # otherwise don't bother.
1456 if missingfnodes:
1456 if missingfnodes:
1457 yield changegroup.chunkheader(len(fname))
1457 yield changegroup.chunkheader(len(fname))
1458 yield fname
1458 yield fname
1459 # Sort the filenodes by their revision # (topological order)
1459 # Sort the filenodes by their revision # (topological order)
1460 nodeiter = list(missingfnodes)
1460 nodeiter = list(missingfnodes)
1461 nodeiter.sort(key=filerevlog.rev)
1461 nodeiter.sort(key=filerevlog.rev)
1462 # Create a group generator and only pass in a changenode
1462 # Create a group generator and only pass in a changenode
1463 # lookup function as we need to collect no information
1463 # lookup function as we need to collect no information
1464 # from filenodes.
1464 # from filenodes.
1465 group = filerevlog.group(nodeiter,
1465 group = filerevlog.group(nodeiter,
1466 lambda fnode: missingfnodes[fnode])
1466 lambda fnode: missingfnodes[fnode])
1467 for chnk in group:
1467 for chnk in group:
1468 self.ui.progress(
1468 self.ui.progress(
1469 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1469 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1470 cnt += 1
1470 cnt += 1
1471 yield chnk
1471 yield chnk
1472 # Signal that no more groups are left.
1472 # Signal that no more groups are left.
1473 yield changegroup.closechunk()
1473 yield changegroup.closechunk()
1474 self.ui.progress(_('bundling files'), None)
1474 self.ui.progress(_('bundling files'), None)
1475
1475
1476 if msng_cl_lst:
1476 if msng_cl_lst:
1477 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1477 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1478
1478
1479 return util.chunkbuffer(gengroup())
1479 return util.chunkbuffer(gengroup())
1480
1480
1481 def changegroup(self, basenodes, source):
1481 def changegroup(self, basenodes, source):
1482 # to avoid a race we use changegroupsubset() (issue1320)
1482 # to avoid a race we use changegroupsubset() (issue1320)
1483 return self.changegroupsubset(basenodes, self.heads(), source)
1483 return self.changegroupsubset(basenodes, self.heads(), source)
1484
1484
1485 def _changegroup(self, nodes, source):
1485 def _changegroup(self, nodes, source):
1486 """Compute the changegroup of all nodes that we have that a recipient
1486 """Compute the changegroup of all nodes that we have that a recipient
1487 doesn't. Return a chunkbuffer object whose read() method will return
1487 doesn't. Return a chunkbuffer object whose read() method will return
1488 successive changegroup chunks.
1488 successive changegroup chunks.
1489
1489
1490 This is much easier than the previous function as we can assume that
1490 This is much easier than the previous function as we can assume that
1491 the recipient has any changenode we aren't sending them.
1491 the recipient has any changenode we aren't sending them.
1492
1492
1493 nodes is the set of nodes to send"""
1493 nodes is the set of nodes to send"""
1494
1494
1495 self.hook('preoutgoing', throw=True, source=source)
1495 self.hook('preoutgoing', throw=True, source=source)
1496
1496
1497 cl = self.changelog
1497 cl = self.changelog
1498 revset = set([cl.rev(n) for n in nodes])
1498 revset = set([cl.rev(n) for n in nodes])
1499 self.changegroupinfo(nodes, source)
1499 self.changegroupinfo(nodes, source)
1500
1500
1501 def identity(x):
1501 def identity(x):
1502 return x
1502 return x
1503
1503
1504 def gennodelst(log):
1504 def gennodelst(log):
1505 for r in log:
1505 for r in log:
1506 if log.linkrev(r) in revset:
1506 if log.linkrev(r) in revset:
1507 yield log.node(r)
1507 yield log.node(r)
1508
1508
1509 def lookuplinkrev_func(revlog):
1509 def lookuplinkrev_func(revlog):
1510 def lookuplinkrev(n):
1510 def lookuplinkrev(n):
1511 return cl.node(revlog.linkrev(revlog.rev(n)))
1511 return cl.node(revlog.linkrev(revlog.rev(n)))
1512 return lookuplinkrev
1512 return lookuplinkrev
1513
1513
1514 def gengroup():
1514 def gengroup():
1515 '''yield a sequence of changegroup chunks (strings)'''
1515 '''yield a sequence of changegroup chunks (strings)'''
1516 # construct a list of all changed files
1516 # construct a list of all changed files
1517 changedfiles = set()
1517 changedfiles = set()
1518 mmfs = {}
1518 mmfs = {}
1519 collect = changegroup.collector(cl, mmfs, changedfiles)
1519 collect = changegroup.collector(cl, mmfs, changedfiles)
1520
1520
1521 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1521 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1522 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1522 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1523 yield chnk
1523 yield chnk
1524 self.ui.progress(_('bundling changes'), None)
1524 self.ui.progress(_('bundling changes'), None)
1525
1525
1526 mnfst = self.manifest
1526 mnfst = self.manifest
1527 nodeiter = gennodelst(mnfst)
1527 nodeiter = gennodelst(mnfst)
1528 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1528 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1529 lookuplinkrev_func(mnfst))):
1529 lookuplinkrev_func(mnfst))):
1530 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1530 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1531 yield chnk
1531 yield chnk
1532 self.ui.progress(_('bundling manifests'), None)
1532 self.ui.progress(_('bundling manifests'), None)
1533
1533
1534 cnt = 0
1534 cnt = 0
1535 for fname in sorted(changedfiles):
1535 for fname in sorted(changedfiles):
1536 filerevlog = self.file(fname)
1536 filerevlog = self.file(fname)
1537 if not len(filerevlog):
1537 if not len(filerevlog):
1538 raise util.Abort(_("empty or missing revlog for %s") % fname)
1538 raise util.Abort(_("empty or missing revlog for %s") % fname)
1539 nodeiter = gennodelst(filerevlog)
1539 nodeiter = gennodelst(filerevlog)
1540 nodeiter = list(nodeiter)
1540 nodeiter = list(nodeiter)
1541 if nodeiter:
1541 if nodeiter:
1542 yield changegroup.chunkheader(len(fname))
1542 yield changegroup.chunkheader(len(fname))
1543 yield fname
1543 yield fname
1544 lookup = lookuplinkrev_func(filerevlog)
1544 lookup = lookuplinkrev_func(filerevlog)
1545 for chnk in filerevlog.group(nodeiter, lookup):
1545 for chnk in filerevlog.group(nodeiter, lookup):
1546 self.ui.progress(
1546 self.ui.progress(
1547 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1547 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1548 cnt += 1
1548 cnt += 1
1549 yield chnk
1549 yield chnk
1550 self.ui.progress(_('bundling files'), None)
1550 self.ui.progress(_('bundling files'), None)
1551
1551
1552 yield changegroup.closechunk()
1552 yield changegroup.closechunk()
1553
1553
1554 if nodes:
1554 if nodes:
1555 self.hook('outgoing', node=hex(nodes[0]), source=source)
1555 self.hook('outgoing', node=hex(nodes[0]), source=source)
1556
1556
1557 return util.chunkbuffer(gengroup())
1557 return util.chunkbuffer(gengroup())
1558
1558
1559 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1559 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1560 """Add the changegroup returned by source.read() to this repo.
1560 """Add the changegroup returned by source.read() to this repo.
1561 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1561 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1562 the URL of the repo where this changegroup is coming from.
1562 the URL of the repo where this changegroup is coming from.
1563
1563
1564 Return an integer summarizing the change to this repo:
1564 Return an integer summarizing the change to this repo:
1565 - nothing changed or no source: 0
1565 - nothing changed or no source: 0
1566 - more heads than before: 1+added heads (2..n)
1566 - more heads than before: 1+added heads (2..n)
1567 - fewer heads than before: -1-removed heads (-2..-n)
1567 - fewer heads than before: -1-removed heads (-2..-n)
1568 - number of heads stays the same: 1
1568 - number of heads stays the same: 1
1569 """
1569 """
1570 def csmap(x):
1570 def csmap(x):
1571 self.ui.debug("add changeset %s\n" % short(x))
1571 self.ui.debug("add changeset %s\n" % short(x))
1572 return len(cl)
1572 return len(cl)
1573
1573
1574 def revmap(x):
1574 def revmap(x):
1575 return cl.rev(x)
1575 return cl.rev(x)
1576
1576
1577 if not source:
1577 if not source:
1578 return 0
1578 return 0
1579
1579
1580 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1580 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1581
1581
1582 changesets = files = revisions = 0
1582 changesets = files = revisions = 0
1583 efiles = set()
1583 efiles = set()
1584
1584
1585 # write changelog data to temp files so concurrent readers will not see
1585 # write changelog data to temp files so concurrent readers will not see
1586 # inconsistent view
1586 # inconsistent view
1587 cl = self.changelog
1587 cl = self.changelog
1588 cl.delayupdate()
1588 cl.delayupdate()
1589 oldheads = len(cl.heads())
1589 oldheads = len(cl.heads())
1590
1590
1591 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1591 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1592 try:
1592 try:
1593 trp = weakref.proxy(tr)
1593 trp = weakref.proxy(tr)
1594 # pull off the changeset group
1594 # pull off the changeset group
1595 self.ui.status(_("adding changesets\n"))
1595 self.ui.status(_("adding changesets\n"))
1596 clstart = len(cl)
1596 clstart = len(cl)
1597 class prog(object):
1597 class prog(object):
1598 step = _('changesets')
1598 step = _('changesets')
1599 count = 1
1599 count = 1
1600 ui = self.ui
1600 ui = self.ui
1601 total = None
1601 total = None
1602 def __call__(self):
1602 def __call__(self):
1603 self.ui.progress(self.step, self.count, unit=_('chunks'),
1603 self.ui.progress(self.step, self.count, unit=_('chunks'),
1604 total=self.total)
1604 total=self.total)
1605 self.count += 1
1605 self.count += 1
1606 pr = prog()
1606 pr = prog()
1607 chunkiter = changegroup.chunkiter(source, progress=pr)
1607 chunkiter = changegroup.chunkiter(source, progress=pr)
1608 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1608 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1609 raise util.Abort(_("received changelog group is empty"))
1609 raise util.Abort(_("received changelog group is empty"))
1610 clend = len(cl)
1610 clend = len(cl)
1611 changesets = clend - clstart
1611 changesets = clend - clstart
1612 for c in xrange(clstart, clend):
1612 for c in xrange(clstart, clend):
1613 efiles.update(self[c].files())
1613 efiles.update(self[c].files())
1614 efiles = len(efiles)
1614 efiles = len(efiles)
1615 self.ui.progress(_('changesets'), None)
1615 self.ui.progress(_('changesets'), None)
1616
1616
1617 # pull off the manifest group
1617 # pull off the manifest group
1618 self.ui.status(_("adding manifests\n"))
1618 self.ui.status(_("adding manifests\n"))
1619 pr.step = _('manifests')
1619 pr.step = _('manifests')
1620 pr.count = 1
1620 pr.count = 1
1621 pr.total = changesets # manifests <= changesets
1621 pr.total = changesets # manifests <= changesets
1622 chunkiter = changegroup.chunkiter(source, progress=pr)
1622 chunkiter = changegroup.chunkiter(source, progress=pr)
1623 # no need to check for empty manifest group here:
1623 # no need to check for empty manifest group here:
1624 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1624 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1625 # no new manifest will be created and the manifest group will
1625 # no new manifest will be created and the manifest group will
1626 # be empty during the pull
1626 # be empty during the pull
1627 self.manifest.addgroup(chunkiter, revmap, trp)
1627 self.manifest.addgroup(chunkiter, revmap, trp)
1628 self.ui.progress(_('manifests'), None)
1628 self.ui.progress(_('manifests'), None)
1629
1629
1630 needfiles = {}
1630 needfiles = {}
1631 if self.ui.configbool('server', 'validate', default=False):
1631 if self.ui.configbool('server', 'validate', default=False):
1632 # validate incoming csets have their manifests
1632 # validate incoming csets have their manifests
1633 for cset in xrange(clstart, clend):
1633 for cset in xrange(clstart, clend):
1634 mfest = self.changelog.read(self.changelog.node(cset))[0]
1634 mfest = self.changelog.read(self.changelog.node(cset))[0]
1635 mfest = self.manifest.readdelta(mfest)
1635 mfest = self.manifest.readdelta(mfest)
1636 # store file nodes we must see
1636 # store file nodes we must see
1637 for f, n in mfest.iteritems():
1637 for f, n in mfest.iteritems():
1638 needfiles.setdefault(f, set()).add(n)
1638 needfiles.setdefault(f, set()).add(n)
1639
1639
1640 # process the files
1640 # process the files
1641 self.ui.status(_("adding file changes\n"))
1641 self.ui.status(_("adding file changes\n"))
1642 pr.step = 'files'
1642 pr.step = 'files'
1643 pr.count = 1
1643 pr.count = 1
1644 pr.total = efiles
1644 pr.total = efiles
1645 while 1:
1645 while 1:
1646 f = changegroup.getchunk(source)
1646 f = changegroup.getchunk(source)
1647 if not f:
1647 if not f:
1648 break
1648 break
1649 self.ui.debug("adding %s revisions\n" % f)
1649 self.ui.debug("adding %s revisions\n" % f)
1650 pr()
1650 pr()
1651 fl = self.file(f)
1651 fl = self.file(f)
1652 o = len(fl)
1652 o = len(fl)
1653 chunkiter = changegroup.chunkiter(source)
1653 chunkiter = changegroup.chunkiter(source)
1654 if fl.addgroup(chunkiter, revmap, trp) is None:
1654 if fl.addgroup(chunkiter, revmap, trp) is None:
1655 raise util.Abort(_("received file revlog group is empty"))
1655 raise util.Abort(_("received file revlog group is empty"))
1656 revisions += len(fl) - o
1656 revisions += len(fl) - o
1657 files += 1
1657 files += 1
1658 if f in needfiles:
1658 if f in needfiles:
1659 needs = needfiles[f]
1659 needs = needfiles[f]
1660 for new in xrange(o, len(fl)):
1660 for new in xrange(o, len(fl)):
1661 n = fl.node(new)
1661 n = fl.node(new)
1662 if n in needs:
1662 if n in needs:
1663 needs.remove(n)
1663 needs.remove(n)
1664 if not needs:
1664 if not needs:
1665 del needfiles[f]
1665 del needfiles[f]
1666 self.ui.progress(_('files'), None)
1666 self.ui.progress(_('files'), None)
1667
1667
1668 for f, needs in needfiles.iteritems():
1668 for f, needs in needfiles.iteritems():
1669 fl = self.file(f)
1669 fl = self.file(f)
1670 for n in needs:
1670 for n in needs:
1671 try:
1671 try:
1672 fl.rev(n)
1672 fl.rev(n)
1673 except error.LookupError:
1673 except error.LookupError:
1674 raise util.Abort(
1674 raise util.Abort(
1675 _('missing file data for %s:%s - run hg verify') %
1675 _('missing file data for %s:%s - run hg verify') %
1676 (f, hex(n)))
1676 (f, hex(n)))
1677
1677
1678 newheads = len(cl.heads())
1678 newheads = len(cl.heads())
1679 heads = ""
1679 heads = ""
1680 if oldheads and newheads != oldheads:
1680 if oldheads and newheads != oldheads:
1681 heads = _(" (%+d heads)") % (newheads - oldheads)
1681 heads = _(" (%+d heads)") % (newheads - oldheads)
1682
1682
1683 self.ui.status(_("added %d changesets"
1683 self.ui.status(_("added %d changesets"
1684 " with %d changes to %d files%s\n")
1684 " with %d changes to %d files%s\n")
1685 % (changesets, revisions, files, heads))
1685 % (changesets, revisions, files, heads))
1686
1686
1687 if changesets > 0:
1687 if changesets > 0:
1688 p = lambda: cl.writepending() and self.root or ""
1688 p = lambda: cl.writepending() and self.root or ""
1689 self.hook('pretxnchangegroup', throw=True,
1689 self.hook('pretxnchangegroup', throw=True,
1690 node=hex(cl.node(clstart)), source=srctype,
1690 node=hex(cl.node(clstart)), source=srctype,
1691 url=url, pending=p)
1691 url=url, pending=p)
1692
1692
1693 # make changelog see real files again
1693 # make changelog see real files again
1694 cl.finalize(trp)
1694 cl.finalize(trp)
1695
1695
1696 tr.close()
1696 tr.close()
1697 finally:
1697 finally:
1698 tr.release()
1698 tr.release()
1699 if lock:
1699 if lock:
1700 lock.release()
1700 lock.release()
1701
1701
1702 if changesets > 0:
1702 if changesets > 0:
1703 # forcefully update the on-disk branch cache
1703 # forcefully update the on-disk branch cache
1704 self.ui.debug("updating the branch cache\n")
1704 self.ui.debug("updating the branch cache\n")
1705 self.updatebranchcache()
1705 self.updatebranchcache()
1706 self.hook("changegroup", node=hex(cl.node(clstart)),
1706 self.hook("changegroup", node=hex(cl.node(clstart)),
1707 source=srctype, url=url)
1707 source=srctype, url=url)
1708
1708
1709 for i in xrange(clstart, clend):
1709 for i in xrange(clstart, clend):
1710 self.hook("incoming", node=hex(cl.node(i)),
1710 self.hook("incoming", node=hex(cl.node(i)),
1711 source=srctype, url=url)
1711 source=srctype, url=url)
1712
1712
1713 # never return 0 here:
1713 # never return 0 here:
1714 if newheads < oldheads:
1714 if newheads < oldheads:
1715 return newheads - oldheads - 1
1715 return newheads - oldheads - 1
1716 else:
1716 else:
1717 return newheads - oldheads + 1
1717 return newheads - oldheads + 1
1718
1718
1719
1719
1720 def stream_in(self, remote):
1720 def stream_in(self, remote):
1721 fp = remote.stream_out()
1721 fp = remote.stream_out()
1722 l = fp.readline()
1722 l = fp.readline()
1723 try:
1723 try:
1724 resp = int(l)
1724 resp = int(l)
1725 except ValueError:
1725 except ValueError:
1726 raise error.ResponseError(
1726 raise error.ResponseError(
1727 _('Unexpected response from remote server:'), l)
1727 _('Unexpected response from remote server:'), l)
1728 if resp == 1:
1728 if resp == 1:
1729 raise util.Abort(_('operation forbidden by server'))
1729 raise util.Abort(_('operation forbidden by server'))
1730 elif resp == 2:
1730 elif resp == 2:
1731 raise util.Abort(_('locking the remote repository failed'))
1731 raise util.Abort(_('locking the remote repository failed'))
1732 elif resp != 0:
1732 elif resp != 0:
1733 raise util.Abort(_('the server sent an unknown error code'))
1733 raise util.Abort(_('the server sent an unknown error code'))
1734 self.ui.status(_('streaming all changes\n'))
1734 self.ui.status(_('streaming all changes\n'))
1735 l = fp.readline()
1735 l = fp.readline()
1736 try:
1736 try:
1737 total_files, total_bytes = map(int, l.split(' ', 1))
1737 total_files, total_bytes = map(int, l.split(' ', 1))
1738 except (ValueError, TypeError):
1738 except (ValueError, TypeError):
1739 raise error.ResponseError(
1739 raise error.ResponseError(
1740 _('Unexpected response from remote server:'), l)
1740 _('Unexpected response from remote server:'), l)
1741 self.ui.status(_('%d files to transfer, %s of data\n') %
1741 self.ui.status(_('%d files to transfer, %s of data\n') %
1742 (total_files, util.bytecount(total_bytes)))
1742 (total_files, util.bytecount(total_bytes)))
1743 start = time.time()
1743 start = time.time()
1744 for i in xrange(total_files):
1744 for i in xrange(total_files):
1745 # XXX doesn't support '\n' or '\r' in filenames
1745 # XXX doesn't support '\n' or '\r' in filenames
1746 l = fp.readline()
1746 l = fp.readline()
1747 try:
1747 try:
1748 name, size = l.split('\0', 1)
1748 name, size = l.split('\0', 1)
1749 size = int(size)
1749 size = int(size)
1750 except (ValueError, TypeError):
1750 except (ValueError, TypeError):
1751 raise error.ResponseError(
1751 raise error.ResponseError(
1752 _('Unexpected response from remote server:'), l)
1752 _('Unexpected response from remote server:'), l)
1753 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1753 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1754 # for backwards compat, name was partially encoded
1754 # for backwards compat, name was partially encoded
1755 ofp = self.sopener(store.decodedir(name), 'w')
1755 ofp = self.sopener(store.decodedir(name), 'w')
1756 for chunk in util.filechunkiter(fp, limit=size):
1756 for chunk in util.filechunkiter(fp, limit=size):
1757 ofp.write(chunk)
1757 ofp.write(chunk)
1758 ofp.close()
1758 ofp.close()
1759 elapsed = time.time() - start
1759 elapsed = time.time() - start
1760 if elapsed <= 0:
1760 if elapsed <= 0:
1761 elapsed = 0.001
1761 elapsed = 0.001
1762 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1762 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1763 (util.bytecount(total_bytes), elapsed,
1763 (util.bytecount(total_bytes), elapsed,
1764 util.bytecount(total_bytes / elapsed)))
1764 util.bytecount(total_bytes / elapsed)))
1765 self.invalidate()
1765 self.invalidate()
1766 return len(self.heads()) + 1
1766 return len(self.heads()) + 1
1767
1767
1768 def clone(self, remote, heads=[], stream=False):
1768 def clone(self, remote, heads=[], stream=False):
1769 '''clone remote repository.
1769 '''clone remote repository.
1770
1770
1771 keyword arguments:
1771 keyword arguments:
1772 heads: list of revs to clone (forces use of pull)
1772 heads: list of revs to clone (forces use of pull)
1773 stream: use streaming clone if possible'''
1773 stream: use streaming clone if possible'''
1774
1774
1775 # now, all clients that can request uncompressed clones can
1775 # now, all clients that can request uncompressed clones can
1776 # read repo formats supported by all servers that can serve
1776 # read repo formats supported by all servers that can serve
1777 # them.
1777 # them.
1778
1778
1779 # if revlog format changes, client will have to check version
1779 # if revlog format changes, client will have to check version
1780 # and format flags on "stream" capability, and use
1780 # and format flags on "stream" capability, and use
1781 # uncompressed only if compatible.
1781 # uncompressed only if compatible.
1782
1782
1783 if stream and not heads and remote.capable('stream'):
1783 if stream and not heads and remote.capable('stream'):
1784 return self.stream_in(remote)
1784 return self.stream_in(remote)
1785 return self.pull(remote, heads)
1785 return self.pull(remote, heads)
1786
1786
1787 def pushkey(self, namespace, key, old, new):
1787 def pushkey(self, namespace, key, old, new):
1788 return pushkey.push(self, namespace, key, old, new)
1788 return pushkey.push(self, namespace, key, old, new)
1789
1789
1790 def listkeys(self, namespace):
1790 def listkeys(self, namespace):
1791 return pushkey.list(self, namespace)
1791 return pushkey.list(self, namespace)
1792
1792
1793 # used to avoid circular references so destructors work
1793 # used to avoid circular references so destructors work
1794 def aftertrans(files):
1794 def aftertrans(files):
1795 renamefiles = [tuple(t) for t in files]
1795 renamefiles = [tuple(t) for t in files]
1796 def a():
1796 def a():
1797 for src, dest in renamefiles:
1797 for src, dest in renamefiles:
1798 util.rename(src, dest)
1798 util.rename(src, dest)
1799 return a
1799 return a
1800
1800
1801 def instance(ui, path, create):
1801 def instance(ui, path, create):
1802 return localrepository(ui, util.drop_scheme('file', path), create)
1802 return localrepository(ui, util.drop_scheme('file', path), create)
1803
1803
1804 def islocal(path):
1804 def islocal(path):
1805 return True
1805 return True
@@ -1,584 +1,584 b''
1 $ rm -rf sub
1 $ rm -rf sub
2 $ mkdir sub
2 $ mkdir sub
3 $ cd sub
3 $ cd sub
4 $ hg init t
4 $ hg init t
5 $ cd t
5 $ cd t
6
6
7 first revision, no sub
7 first revision, no sub
8
8
9 $ echo a > a
9 $ echo a > a
10 $ hg ci -Am0
10 $ hg ci -Am0
11 adding a
11 adding a
12
12
13 add first sub
13 add first sub
14
14
15 $ echo s = s > .hgsub
15 $ echo s = s > .hgsub
16 $ hg add .hgsub
16 $ hg add .hgsub
17 $ hg init s
17 $ hg init s
18 $ echo a > s/a
18 $ echo a > s/a
19
19
20 issue2232 - committing a subrepo without .hgsub
20 issue2232 - committing a subrepo without .hgsub
21
21
22 $ hg ci -mbad s
22 $ hg ci -mbad s
23 abort: can't commit subrepos without .hgsub
23 abort: can't commit subrepos without .hgsub
24
24
25 $ hg -R s ci -Ams0
25 $ hg -R s ci -Ams0
26 adding a
26 adding a
27 $ hg sum
27 $ hg sum
28 parent: 0:f7b1eb17ad24 tip
28 parent: 0:f7b1eb17ad24 tip
29 0
29 0
30 branch: default
30 branch: default
31 commit: 1 added, 1 subrepos
31 commit: 1 added, 1 subrepos
32 update: (current)
32 update: (current)
33 $ hg ci -m1
33 $ hg ci -m1
34 committing subrepository s
34 committing subrepository s
35
35
36 issue 2022 - update -C
36 issue 2022 - update -C
37
37
38 $ echo b > s/a
38 $ echo b > s/a
39 $ hg sum
39 $ hg sum
40 parent: 1:7cf8cfea66e4 tip
40 parent: 1:7cf8cfea66e4 tip
41 1
41 1
42 branch: default
42 branch: default
43 commit: 1 subrepos
43 commit: 1 subrepos
44 update: (current)
44 update: (current)
45 $ hg co -C 1
45 $ hg co -C 1
46 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 $ hg sum
47 $ hg sum
48 parent: 1:7cf8cfea66e4 tip
48 parent: 1:7cf8cfea66e4 tip
49 1
49 1
50 branch: default
50 branch: default
51 commit: (clean)
51 commit: (clean)
52 update: (current)
52 update: (current)
53
53
54 add sub sub
54 add sub sub
55
55
56 $ echo ss = ss > s/.hgsub
56 $ echo ss = ss > s/.hgsub
57 $ hg init s/ss
57 $ hg init s/ss
58 $ echo a > s/ss/a
58 $ echo a > s/ss/a
59 $ hg -R s add s/.hgsub
59 $ hg -R s add s/.hgsub
60 $ hg -R s/ss add s/ss/a
60 $ hg -R s/ss add s/ss/a
61 $ hg sum
61 $ hg sum
62 parent: 1:7cf8cfea66e4 tip
62 parent: 1:7cf8cfea66e4 tip
63 1
63 1
64 branch: default
64 branch: default
65 commit: 1 subrepos
65 commit: 1 subrepos
66 update: (current)
66 update: (current)
67 $ hg ci -m2
67 $ hg ci -m2
68 committing subrepository s
68 committing subrepository s
69 committing subrepository s/ss
69 committing subrepository s/ss
70 $ hg sum
70 $ hg sum
71 parent: 2:df30734270ae tip
71 parent: 2:df30734270ae tip
72 2
72 2
73 branch: default
73 branch: default
74 commit: (clean)
74 commit: (clean)
75 update: (current)
75 update: (current)
76
76
77 bump sub rev
77 bump sub rev
78
78
79 $ echo b > s/a
79 $ echo b > s/a
80 $ hg -R s ci -ms1
80 $ hg -R s ci -ms1
81 $ hg ci -m3
81 $ hg ci -m3
82 committing subrepository s
82 committing subrepository s
83
83
84 leave sub dirty
84 leave sub dirty
85
85
86 $ echo c > s/a
86 $ echo c > s/a
87 $ hg ci -m4
87 $ hg ci -m4
88 committing subrepository s
88 committing subrepository s
89 $ hg tip -R s
89 $ hg tip -R s
90 changeset: 3:1c833a7a9e3a
90 changeset: 3:1c833a7a9e3a
91 tag: tip
91 tag: tip
92 user: test
92 user: test
93 date: Thu Jan 01 00:00:00 1970 +0000
93 date: Thu Jan 01 00:00:00 1970 +0000
94 summary: 4
94 summary: 4
95
95
96
96
97 check caching
97 check caching
98
98
99 $ hg co 0
99 $ hg co 0
100 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
100 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
101 $ hg debugsub
101 $ hg debugsub
102
102
103 restore
103 restore
104
104
105 $ hg co
105 $ hg co
106 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
106 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 $ hg debugsub
107 $ hg debugsub
108 path s
108 path s
109 source s
109 source s
110 revision 1c833a7a9e3a4445c711aaf0f012379cd0d4034e
110 revision 1c833a7a9e3a4445c711aaf0f012379cd0d4034e
111
111
112 new branch for merge tests
112 new branch for merge tests
113
113
114 $ hg co 1
114 $ hg co 1
115 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
115 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
116 $ echo t = t >> .hgsub
116 $ echo t = t >> .hgsub
117 $ hg init t
117 $ hg init t
118 $ echo t > t/t
118 $ echo t > t/t
119 $ hg -R t add t
119 $ hg -R t add t
120 adding t/t
120 adding t/t
121
121
122 5
122 5
123
123
124 $ hg ci -m5 # add sub
124 $ hg ci -m5 # add sub
125 committing subrepository t
125 committing subrepository t
126 created new head
126 created new head
127 $ echo t2 > t/t
127 $ echo t2 > t/t
128
128
129 6
129 6
130
130
131 $ hg st -R s
131 $ hg st -R s
132 $ hg ci -m6 # change sub
132 $ hg ci -m6 # change sub
133 committing subrepository t
133 committing subrepository t
134 $ hg debugsub
134 $ hg debugsub
135 path s
135 path s
136 source s
136 source s
137 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
137 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
138 path t
138 path t
139 source t
139 source t
140 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
140 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
141 $ echo t3 > t/t
141 $ echo t3 > t/t
142
142
143 7
143 7
144
144
145 $ hg ci -m7 # change sub again for conflict test
145 $ hg ci -m7 # change sub again for conflict test
146 committing subrepository t
146 committing subrepository t
147 $ hg rm .hgsub
147 $ hg rm .hgsub
148
148
149 8
149 8
150
150
151 $ hg ci -m8 # remove sub
151 $ hg ci -m8 # remove sub
152
152
153 merge tests
153 merge tests
154
154
155 $ hg co -C 3
155 $ hg co -C 3
156 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
156 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 $ hg merge 5 # test adding
157 $ hg merge 5 # test adding
158 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 (branch merge, don't forget to commit)
159 (branch merge, don't forget to commit)
160 $ hg debugsub
160 $ hg debugsub
161 path s
161 path s
162 source s
162 source s
163 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
163 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
164 path t
164 path t
165 source t
165 source t
166 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
166 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
167 $ hg ci -m9
167 $ hg ci -m9
168 created new head
168 created new head
169 $ hg merge 6 --debug # test change
169 $ hg merge 6 --debug # test change
170 searching for copies back to rev 2
170 searching for copies back to rev 2
171 resolving manifests
171 resolving manifests
172 overwrite None partial False
172 overwrite None partial False
173 ancestor 1f14a2e2d3ec local f0d2028bf86d+ remote 1831e14459c4
173 ancestor 1f14a2e2d3ec local f0d2028bf86d+ remote 1831e14459c4
174 .hgsubstate: versions differ -> m
174 .hgsubstate: versions differ -> m
175 updating: .hgsubstate 1/1 files (100.00%)
175 updating: .hgsubstate 1/1 files (100.00%)
176 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
176 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
177 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
177 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
178 getting subrepo t
178 getting subrepo t
179 resolving manifests
179 resolving manifests
180 overwrite True partial False
180 overwrite True partial False
181 ancestor 60ca1237c194+ local 60ca1237c194+ remote 6747d179aa9a
181 ancestor 60ca1237c194+ local 60ca1237c194+ remote 6747d179aa9a
182 t: remote is newer -> g
182 t: remote is newer -> g
183 updating: t 1/1 files (100.00%)
183 updating: t 1/1 files (100.00%)
184 getting t
184 getting t
185 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
185 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
186 (branch merge, don't forget to commit)
186 (branch merge, don't forget to commit)
187 $ hg debugsub
187 $ hg debugsub
188 path s
188 path s
189 source s
189 source s
190 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
190 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
191 path t
191 path t
192 source t
192 source t
193 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
193 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
194 $ echo conflict > t/t
194 $ echo conflict > t/t
195 $ hg ci -m10
195 $ hg ci -m10
196 committing subrepository t
196 committing subrepository t
197 $ HGMERGE=internal:merge hg merge --debug 7 # test conflict
197 $ HGMERGE=internal:merge hg merge --debug 7 # test conflict
198 searching for copies back to rev 2
198 searching for copies back to rev 2
199 resolving manifests
199 resolving manifests
200 overwrite None partial False
200 overwrite None partial False
201 ancestor 1831e14459c4 local e45c8b14af55+ remote f94576341bcf
201 ancestor 1831e14459c4 local e45c8b14af55+ remote f94576341bcf
202 .hgsubstate: versions differ -> m
202 .hgsubstate: versions differ -> m
203 updating: .hgsubstate 1/1 files (100.00%)
203 updating: .hgsubstate 1/1 files (100.00%)
204 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
204 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
205 subrepo t: both sides changed, merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
205 subrepo t: both sides changed, merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
206 merging subrepo t
206 merging subrepo t
207 searching for copies back to rev 2
207 searching for copies back to rev 2
208 resolving manifests
208 resolving manifests
209 overwrite None partial False
209 overwrite None partial False
210 ancestor 6747d179aa9a local 20a0db6fbf6c+ remote 7af322bc1198
210 ancestor 6747d179aa9a local 20a0db6fbf6c+ remote 7af322bc1198
211 t: versions differ -> m
211 t: versions differ -> m
212 preserving t for resolve of t
212 preserving t for resolve of t
213 updating: t 1/1 files (100.00%)
213 updating: t 1/1 files (100.00%)
214 picked tool 'internal:merge' for t (binary False symlink False)
214 picked tool 'internal:merge' for t (binary False symlink False)
215 merging t
215 merging t
216 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
216 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
217 warning: conflicts during merge.
217 warning: conflicts during merge.
218 merging t failed!
218 merging t failed!
219 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
219 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
220 use 'hg resolve' to retry unresolved file merges or 'hg update -C' to abandon
220 use 'hg resolve' to retry unresolved file merges or 'hg update -C' to abandon
221 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
221 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
222 (branch merge, don't forget to commit)
222 (branch merge, don't forget to commit)
223
223
224 should conflict
224 should conflict
225
225
226 $ cat t/t
226 $ cat t/t
227 <<<<<<< local
227 <<<<<<< local
228 conflict
228 conflict
229 =======
229 =======
230 t3
230 t3
231 >>>>>>> other
231 >>>>>>> other
232
232
233 clone
233 clone
234
234
235 $ cd ..
235 $ cd ..
236 $ hg clone t tc
236 $ hg clone t tc
237 updating to branch default
237 updating to branch default
238 pulling subrepo s from .*/sub/t/s
238 pulling subrepo s from .*/sub/t/s
239 requesting all changes
239 requesting all changes
240 adding changesets
240 adding changesets
241 adding manifests
241 adding manifests
242 adding file changes
242 adding file changes
243 added 4 changesets with 5 changes to 3 files
243 added 4 changesets with 5 changes to 3 files
244 pulling subrepo s/ss from .*/sub/t/s/ss
244 pulling subrepo s/ss from .*/sub/t/s/ss
245 requesting all changes
245 requesting all changes
246 adding changesets
246 adding changesets
247 adding manifests
247 adding manifests
248 adding file changes
248 adding file changes
249 added 1 changesets with 1 changes to 1 files
249 added 1 changesets with 1 changes to 1 files
250 pulling subrepo t from .*/sub/t/t
250 pulling subrepo t from .*/sub/t/t
251 requesting all changes
251 requesting all changes
252 adding changesets
252 adding changesets
253 adding manifests
253 adding manifests
254 adding file changes
254 adding file changes
255 added 4 changesets with 4 changes to 1 files (+1 heads)
255 added 4 changesets with 4 changes to 1 files (+1 heads)
256 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
256 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
257 $ cd tc
257 $ cd tc
258 $ hg debugsub
258 $ hg debugsub
259 path s
259 path s
260 source s
260 source s
261 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
261 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
262 path t
262 path t
263 source t
263 source t
264 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
264 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
265
265
266 push
266 push
267
267
268 $ echo bah > t/t
268 $ echo bah > t/t
269 $ hg ci -m11
269 $ hg ci -m11
270 committing subrepository t
270 committing subrepository t
271 $ hg push
271 $ hg push
272 pushing .*sub/t
272 pushing .*sub/t
273 pushing .*sub/t/s/ss
273 pushing .*sub/t/s/ss
274 searching for changes
274 searching for changes
275 no changes found
275 no changes found
276 pushing .*sub/t/s
276 pushing .*sub/t/s
277 searching for changes
277 searching for changes
278 no changes found
278 no changes found
279 pushing .*sub/t/t
279 pushing .*sub/t/t
280 searching for changes
280 searching for changes
281 adding changesets
281 adding changesets
282 adding manifests
282 adding manifests
283 adding file changes
283 adding file changes
284 added 1 changesets with 1 changes to 1 files
284 added 1 changesets with 1 changes to 1 files
285 searching for changes
285 searching for changes
286 adding changesets
286 adding changesets
287 adding manifests
287 adding manifests
288 adding file changes
288 adding file changes
289 added 1 changesets with 1 changes to 1 files
289 added 1 changesets with 1 changes to 1 files
290
290
291 push -f
291 push -f
292
292
293 $ echo bah > s/a
293 $ echo bah > s/a
294 $ hg ci -m12
294 $ hg ci -m12
295 committing subrepository s
295 committing subrepository s
296 $ hg push
296 $ hg push
297 pushing .*sub/t
297 pushing .*sub/t
298 pushing .*sub/t/s/ss
298 pushing .*sub/t/s/ss
299 searching for changes
299 searching for changes
300 no changes found
300 no changes found
301 pushing .*sub/t/s
301 pushing .*sub/t/s
302 searching for changes
302 searching for changes
303 abort: push creates new remote heads on branch 'default'!
303 abort: push creates new remote heads on branch 'default'!
304 (did you forget to merge? use push -f to force)
304 (did you forget to merge? use push -f to force)
305 $ hg push -f
305 $ hg push -f
306 pushing .*sub/t
306 pushing .*sub/t
307 pushing .*sub/t/s/ss
307 pushing .*sub/t/s/ss
308 searching for changes
308 searching for changes
309 no changes found
309 no changes found
310 pushing .*sub/t/s
310 pushing .*sub/t/s
311 searching for changes
311 searching for changes
312 adding changesets
312 adding changesets
313 adding manifests
313 adding manifests
314 adding file changes
314 adding file changes
315 added 1 changesets with 1 changes to 1 files (+1 heads)
315 added 1 changesets with 1 changes to 1 files (+1 heads)
316 pushing .*sub/t/t
316 pushing .*sub/t/t
317 searching for changes
317 searching for changes
318 no changes found
318 no changes found
319 searching for changes
319 searching for changes
320 adding changesets
320 adding changesets
321 adding manifests
321 adding manifests
322 adding file changes
322 adding file changes
323 added 1 changesets with 1 changes to 1 files
323 added 1 changesets with 1 changes to 1 files
324
324
325 update
325 update
326
326
327 $ cd ../t
327 $ cd ../t
328 $ hg up -C # discard our earlier merge
328 $ hg up -C # discard our earlier merge
329 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
329 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
330 $ echo blah > t/t
330 $ echo blah > t/t
331 $ hg ci -m13
331 $ hg ci -m13
332 committing subrepository t
332 committing subrepository t
333
333
334 pull
334 pull
335
335
336 $ cd ../tc
336 $ cd ../tc
337 $ hg pull
337 $ hg pull
338 pulling .*sub/t
338 pulling .*sub/t
339 searching for changes
339 searching for changes
340 adding changesets
340 adding changesets
341 adding manifests
341 adding manifests
342 adding file changes
342 adding file changes
343 added 1 changesets with 1 changes to 1 files
343 added 1 changesets with 1 changes to 1 files
344 (run 'hg update' to get a working copy)
344 (run 'hg update' to get a working copy)
345
345
346 should pull t
346 should pull t
347
347
348 $ hg up
348 $ hg up
349 pulling subrepo t from .*/sub/t/t
349 pulling subrepo t from .*/sub/t/t
350 searching for changes
350 searching for changes
351 adding changesets
351 adding changesets
352 adding manifests
352 adding manifests
353 adding file changes
353 adding file changes
354 added 1 changesets with 1 changes to 1 files
354 added 1 changesets with 1 changes to 1 files
355 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
355 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
356 $ cat t/t
356 $ cat t/t
357 blah
357 blah
358
358
359 bogus subrepo path aborts
359 bogus subrepo path aborts
360
360
361 $ echo 'bogus=[boguspath' >> .hgsub
361 $ echo 'bogus=[boguspath' >> .hgsub
362 $ hg ci -m 'bogus subrepo path'
362 $ hg ci -m 'bogus subrepo path'
363 abort: missing ] in subrepo source
363 abort: missing ] in subrepo source
364
364
365 issue 1986
365 issue 1986
366
366
367 # subrepo layout
367 # subrepo layout
368 #
368 #
369 # o 5 br
369 # o 5 br
370 # /|
370 # /|
371 # o | 4 default
371 # o | 4 default
372 # | |
372 # | |
373 # | o 3 br
373 # | o 3 br
374 # |/|
374 # |/|
375 # o | 2 default
375 # o | 2 default
376 # | |
376 # | |
377 # | o 1 br
377 # | o 1 br
378 # |/
378 # |/
379 # o 0 default
379 # o 0 default
380
380
381 $ cd ..
381 $ cd ..
382 $ rm -rf sub
382 $ rm -rf sub
383 $ hg init main
383 $ hg init main
384 $ cd main
384 $ cd main
385 $ hg init s
385 $ hg init s
386 $ cd s
386 $ cd s
387 $ echo a > a
387 $ echo a > a
388 $ hg ci -Am1
388 $ hg ci -Am1
389 adding a
389 adding a
390 $ hg branch br
390 $ hg branch br
391 marked working directory as branch br
391 marked working directory as branch br
392 $ echo a >> a
392 $ echo a >> a
393 $ hg ci -m1
393 $ hg ci -m1
394 $ hg up default
394 $ hg up default
395 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
395 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
396 $ echo b > b
396 $ echo b > b
397 $ hg ci -Am1
397 $ hg ci -Am1
398 adding b
398 adding b
399 $ hg up br
399 $ hg up br
400 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
400 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
401 $ hg merge tip
401 $ hg merge tip
402 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
402 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
403 (branch merge, don't forget to commit)
403 (branch merge, don't forget to commit)
404 $ hg ci -m1
404 $ hg ci -m1
405 $ hg up 2
405 $ hg up 2
406 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
406 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
407 $ echo c > c
407 $ echo c > c
408 $ hg ci -Am1
408 $ hg ci -Am1
409 adding c
409 adding c
410 $ hg up 3
410 $ hg up 3
411 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
411 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
412 $ hg merge 4
412 $ hg merge 4
413 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
413 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
414 (branch merge, don't forget to commit)
414 (branch merge, don't forget to commit)
415 $ hg ci -m1
415 $ hg ci -m1
416
416
417 # main repo layout:
417 # main repo layout:
418 #
418 #
419 # * <-- try to merge default into br again
419 # * <-- try to merge default into br again
420 # .`|
420 # .`|
421 # . o 5 br --> substate = 5
421 # . o 5 br --> substate = 5
422 # . |
422 # . |
423 # o | 4 default --> substate = 4
423 # o | 4 default --> substate = 4
424 # | |
424 # | |
425 # | o 3 br --> substate = 2
425 # | o 3 br --> substate = 2
426 # |/|
426 # |/|
427 # o | 2 default --> substate = 2
427 # o | 2 default --> substate = 2
428 # | |
428 # | |
429 # | o 1 br --> substate = 3
429 # | o 1 br --> substate = 3
430 # |/
430 # |/
431 # o 0 default --> substate = 2
431 # o 0 default --> substate = 2
432
432
433 $ cd ..
433 $ cd ..
434 $ echo 's = s' > .hgsub
434 $ echo 's = s' > .hgsub
435 $ hg -R s up 2
435 $ hg -R s up 2
436 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
436 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
437 $ hg ci -Am1
437 $ hg ci -Am1
438 adding .hgsub
438 adding .hgsub
439 committing subrepository s
439 committing subrepository s
440 $ hg branch br
440 $ hg branch br
441 marked working directory as branch br
441 marked working directory as branch br
442 $ echo b > b
442 $ echo b > b
443 $ hg -R s up 3
443 $ hg -R s up 3
444 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
444 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
445 $ hg ci -Am1
445 $ hg ci -Am1
446 adding b
446 adding b
447 committing subrepository s
447 committing subrepository s
448 $ hg up default
448 $ hg up default
449 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
449 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
450 $ echo c > c
450 $ echo c > c
451 $ hg ci -Am1
451 $ hg ci -Am1
452 adding c
452 adding c
453 $ hg up 1
453 $ hg up 1
454 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
454 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
455 $ hg merge 2
455 $ hg merge 2
456 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
456 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
457 (branch merge, don't forget to commit)
457 (branch merge, don't forget to commit)
458 $ hg ci -m1
458 $ hg ci -m1
459 $ hg up 2
459 $ hg up 2
460 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
460 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
461 $ hg -R s up 4
461 $ hg -R s up 4
462 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
462 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
463 $ echo d > d
463 $ echo d > d
464 $ hg ci -Am1
464 $ hg ci -Am1
465 adding d
465 adding d
466 committing subrepository s
466 committing subrepository s
467 $ hg up 3
467 $ hg up 3
468 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
468 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
469 $ hg -R s up 5
469 $ hg -R s up 5
470 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
470 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
471 $ echo e > e
471 $ echo e > e
472 $ hg ci -Am1
472 $ hg ci -Am1
473 adding e
473 adding e
474 committing subrepository s
474 committing subrepository s
475
475
476 $ hg up 5
476 $ hg up 5
477 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
477 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
478 $ hg merge 4 # try to merge default into br again
478 $ hg merge 4 # try to merge default into br again
479 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
479 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
480 (branch merge, don't forget to commit)
480 (branch merge, don't forget to commit)
481 $ cd ..
481 $ cd ..
482
482
483 test subrepo delete from .hgsubstate
483 test subrepo delete from .hgsubstate
484
484
485 $ hg init testdelete
485 $ hg init testdelete
486 $ mkdir testdelete/nested testdelete/nested2
486 $ mkdir testdelete/nested testdelete/nested2
487 $ hg init testdelete/nested
487 $ hg init testdelete/nested
488 $ hg init testdelete/nested2
488 $ hg init testdelete/nested2
489 $ echo test > testdelete/nested/foo
489 $ echo test > testdelete/nested/foo
490 $ echo test > testdelete/nested2/foo
490 $ echo test > testdelete/nested2/foo
491 $ hg -R testdelete/nested add
491 $ hg -R testdelete/nested add
492 adding testdelete/nested/foo
492 adding testdelete/nested/foo
493 $ hg -R testdelete/nested2 add
493 $ hg -R testdelete/nested2 add
494 adding testdelete/nested2/foo
494 adding testdelete/nested2/foo
495 $ hg -R testdelete/nested ci -m test
495 $ hg -R testdelete/nested ci -m test
496 $ hg -R testdelete/nested2 ci -m test
496 $ hg -R testdelete/nested2 ci -m test
497 $ echo nested = nested > testdelete/.hgsub
497 $ echo nested = nested > testdelete/.hgsub
498 $ echo nested2 = nested2 >> testdelete/.hgsub
498 $ echo nested2 = nested2 >> testdelete/.hgsub
499 $ hg -R testdelete add
499 $ hg -R testdelete add
500 adding testdelete/.hgsub
500 adding testdelete/.hgsub
501 $ hg -R testdelete ci -m "nested 1 & 2 added"
501 $ hg -R testdelete ci -m "nested 1 & 2 added"
502 committing subrepository nested
502 committing subrepository nested2
503 committing subrepository nested2
503 committing subrepository nested
504 $ echo nested = nested > testdelete/.hgsub
504 $ echo nested = nested > testdelete/.hgsub
505 $ hg -R testdelete ci -m "nested 2 deleted"
505 $ hg -R testdelete ci -m "nested 2 deleted"
506 $ cat testdelete/.hgsubstate
506 $ cat testdelete/.hgsubstate
507 bdf5c9a3103743d900b12ae0db3ffdcfd7b0d878 nested
507 bdf5c9a3103743d900b12ae0db3ffdcfd7b0d878 nested
508 $ hg -R testdelete remove testdelete/.hgsub
508 $ hg -R testdelete remove testdelete/.hgsub
509 $ hg -R testdelete ci -m ".hgsub deleted"
509 $ hg -R testdelete ci -m ".hgsub deleted"
510 $ cat testdelete/.hgsubstate
510 $ cat testdelete/.hgsubstate
511
511
512 test repository cloning
512 test repository cloning
513
513
514 $ mkdir mercurial mercurial2
514 $ mkdir mercurial mercurial2
515 $ hg init nested_absolute
515 $ hg init nested_absolute
516 $ echo test > nested_absolute/foo
516 $ echo test > nested_absolute/foo
517 $ hg -R nested_absolute add
517 $ hg -R nested_absolute add
518 adding nested_absolute/foo
518 adding nested_absolute/foo
519 $ hg -R nested_absolute ci -mtest
519 $ hg -R nested_absolute ci -mtest
520 $ cd mercurial
520 $ cd mercurial
521 $ hg init nested_relative
521 $ hg init nested_relative
522 $ echo test2 > nested_relative/foo2
522 $ echo test2 > nested_relative/foo2
523 $ hg -R nested_relative add
523 $ hg -R nested_relative add
524 adding nested_relative/foo2
524 adding nested_relative/foo2
525 $ hg -R nested_relative ci -mtest2
525 $ hg -R nested_relative ci -mtest2
526 $ hg init main
526 $ hg init main
527 $ echo "nested_relative = ../nested_relative" > main/.hgsub
527 $ echo "nested_relative = ../nested_relative" > main/.hgsub
528 $ echo "nested_absolute = `pwd`/nested_absolute" >> main/.hgsub
528 $ echo "nested_absolute = `pwd`/nested_absolute" >> main/.hgsub
529 $ hg -R main add
529 $ hg -R main add
530 adding main/.hgsub
530 adding main/.hgsub
531 $ hg -R main ci -m "add subrepos"
531 $ hg -R main ci -m "add subrepos"
532 committing subrepository nested_absolute
532 committing subrepository nested_relative
533 committing subrepository nested_relative
533 committing subrepository nested_absolute
534 $ cd ..
534 $ cd ..
535 $ hg clone mercurial/main mercurial2/main
535 $ hg clone mercurial/main mercurial2/main
536 updating to branch default
536 updating to branch default
537 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
537 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
538 $ cat mercurial2/main/nested_absolute/.hg/hgrc \
538 $ cat mercurial2/main/nested_absolute/.hg/hgrc \
539 > mercurial2/main/nested_relative/.hg/hgrc
539 > mercurial2/main/nested_relative/.hg/hgrc
540 [paths]
540 [paths]
541 default = .*/test-subrepo.t/sub/mercurial/nested_absolute
541 default = .*/test-subrepo.t/sub/mercurial/nested_absolute
542 [paths]
542 [paths]
543 default = .*/test-subrepo.t/sub/mercurial/nested_relative
543 default = .*/test-subrepo.t/sub/mercurial/nested_relative
544 $ rm -rf mercurial mercurial2
544 $ rm -rf mercurial mercurial2
545
545
546 issue 1977
546 issue 1977
547
547
548 $ hg init repo
548 $ hg init repo
549 $ hg init repo/s
549 $ hg init repo/s
550 $ echo a > repo/s/a
550 $ echo a > repo/s/a
551 $ hg -R repo/s ci -Am0
551 $ hg -R repo/s ci -Am0
552 adding a
552 adding a
553 $ echo s = s > repo/.hgsub
553 $ echo s = s > repo/.hgsub
554 $ hg -R repo ci -Am1
554 $ hg -R repo ci -Am1
555 adding .hgsub
555 adding .hgsub
556 committing subrepository s
556 committing subrepository s
557 $ hg clone repo repo2
557 $ hg clone repo repo2
558 updating to branch default
558 updating to branch default
559 pulling subrepo s from .*/sub/repo/s
559 pulling subrepo s from .*/sub/repo/s
560 requesting all changes
560 requesting all changes
561 adding changesets
561 adding changesets
562 adding manifests
562 adding manifests
563 adding file changes
563 adding file changes
564 added 1 changesets with 1 changes to 1 files
564 added 1 changesets with 1 changes to 1 files
565 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
565 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
566 $ hg -q -R repo2 pull -u
566 $ hg -q -R repo2 pull -u
567 $ echo 1 > repo2/s/a
567 $ echo 1 > repo2/s/a
568 $ hg -R repo2/s ci -m2
568 $ hg -R repo2/s ci -m2
569 $ hg -q -R repo2/s push
569 $ hg -q -R repo2/s push
570 $ hg -R repo2/s up -C 0
570 $ hg -R repo2/s up -C 0
571 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
571 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
572 $ echo 2 > repo2/s/a
572 $ echo 2 > repo2/s/a
573 $ hg -R repo2/s ci -m3
573 $ hg -R repo2/s ci -m3
574 created new head
574 created new head
575 $ hg -R repo2 ci -m3
575 $ hg -R repo2 ci -m3
576 committing subrepository s
576 committing subrepository s
577 $ hg -q -R repo2 push
577 $ hg -q -R repo2 push
578 abort: push creates new remote heads on branch 'default'!
578 abort: push creates new remote heads on branch 'default'!
579 (did you forget to merge? use push -f to force)
579 (did you forget to merge? use push -f to force)
580 $ hg -R repo update
580 $ hg -R repo update
581 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
581 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
582 $ rm -rf repo2 repo
582 $ rm -rf repo2 repo
583
583
584 $ exit 0
584 $ exit 0
General Comments 0
You need to be logged in to leave comments. Login now