##// END OF EJS Templates
localrepo: cleanup branch tip computation
Benoit Boissinot -
r10392:9be6c590 default
parent child Browse files
Show More
@@ -1,2137 +1,2133 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92 self.sopener.options = {}
92 self.sopener.options = {}
93
93
94 # These two define the set of tags for this repository. _tags
94 # These two define the set of tags for this repository. _tags
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # 'local'. (Global tags are defined by .hgtags across all
96 # 'local'. (Global tags are defined by .hgtags across all
97 # heads, and local tags are defined in .hg/localtags.) They
97 # heads, and local tags are defined in .hg/localtags.) They
98 # constitute the in-memory cache of tags.
98 # constitute the in-memory cache of tags.
99 self._tags = None
99 self._tags = None
100 self._tagtypes = None
100 self._tagtypes = None
101
101
102 self._branchcache = None # in UTF-8
102 self._branchcache = None # in UTF-8
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.options['defversion'] = c.version
116 self.sopener.options['defversion'] = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __contains__(self, changeid):
132 def __contains__(self, changeid):
133 try:
133 try:
134 return bool(self.lookup(changeid))
134 return bool(self.lookup(changeid))
135 except error.RepoLookupError:
135 except error.RepoLookupError:
136 return False
136 return False
137
137
138 def __nonzero__(self):
138 def __nonzero__(self):
139 return True
139 return True
140
140
141 def __len__(self):
141 def __len__(self):
142 return len(self.changelog)
142 return len(self.changelog)
143
143
144 def __iter__(self):
144 def __iter__(self):
145 for i in xrange(len(self)):
145 for i in xrange(len(self)):
146 yield i
146 yield i
147
147
148 def url(self):
148 def url(self):
149 return 'file:' + self.root
149 return 'file:' + self.root
150
150
151 def hook(self, name, throw=False, **args):
151 def hook(self, name, throw=False, **args):
152 return hook.hook(self.ui, self, name, throw, **args)
152 return hook.hook(self.ui, self, name, throw, **args)
153
153
154 tag_disallowed = ':\r\n'
154 tag_disallowed = ':\r\n'
155
155
156 def _tag(self, names, node, message, local, user, date, extra={}):
156 def _tag(self, names, node, message, local, user, date, extra={}):
157 if isinstance(names, str):
157 if isinstance(names, str):
158 allchars = names
158 allchars = names
159 names = (names,)
159 names = (names,)
160 else:
160 else:
161 allchars = ''.join(names)
161 allchars = ''.join(names)
162 for c in self.tag_disallowed:
162 for c in self.tag_disallowed:
163 if c in allchars:
163 if c in allchars:
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165
165
166 for name in names:
166 for name in names:
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 local=local)
168 local=local)
169
169
170 def writetags(fp, names, munge, prevtags):
170 def writetags(fp, names, munge, prevtags):
171 fp.seek(0, 2)
171 fp.seek(0, 2)
172 if prevtags and prevtags[-1] != '\n':
172 if prevtags and prevtags[-1] != '\n':
173 fp.write('\n')
173 fp.write('\n')
174 for name in names:
174 for name in names:
175 m = munge and munge(name) or name
175 m = munge and munge(name) or name
176 if self._tagtypes and name in self._tagtypes:
176 if self._tagtypes and name in self._tagtypes:
177 old = self._tags.get(name, nullid)
177 old = self._tags.get(name, nullid)
178 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(old), m))
179 fp.write('%s %s\n' % (hex(node), m))
179 fp.write('%s %s\n' % (hex(node), m))
180 fp.close()
180 fp.close()
181
181
182 prevtags = ''
182 prevtags = ''
183 if local:
183 if local:
184 try:
184 try:
185 fp = self.opener('localtags', 'r+')
185 fp = self.opener('localtags', 'r+')
186 except IOError:
186 except IOError:
187 fp = self.opener('localtags', 'a')
187 fp = self.opener('localtags', 'a')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # local tags are stored in the current charset
191 # local tags are stored in the current charset
192 writetags(fp, names, None, prevtags)
192 writetags(fp, names, None, prevtags)
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195 return
195 return
196
196
197 try:
197 try:
198 fp = self.wfile('.hgtags', 'rb+')
198 fp = self.wfile('.hgtags', 'rb+')
199 except IOError:
199 except IOError:
200 fp = self.wfile('.hgtags', 'ab')
200 fp = self.wfile('.hgtags', 'ab')
201 else:
201 else:
202 prevtags = fp.read()
202 prevtags = fp.read()
203
203
204 # committed tags are stored in UTF-8
204 # committed tags are stored in UTF-8
205 writetags(fp, names, encoding.fromlocal, prevtags)
205 writetags(fp, names, encoding.fromlocal, prevtags)
206
206
207 if '.hgtags' not in self.dirstate:
207 if '.hgtags' not in self.dirstate:
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 m = match_.exact(self.root, '', ['.hgtags'])
210 m = match_.exact(self.root, '', ['.hgtags'])
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212
212
213 for name in names:
213 for name in names:
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 return tagnode
216 return tagnode
217
217
218 def tag(self, names, node, message, local, user, date):
218 def tag(self, names, node, message, local, user, date):
219 '''tag a revision with one or more symbolic names.
219 '''tag a revision with one or more symbolic names.
220
220
221 names is a list of strings or, when adding a single tag, names may be a
221 names is a list of strings or, when adding a single tag, names may be a
222 string.
222 string.
223
223
224 if local is True, the tags are stored in a per-repository file.
224 if local is True, the tags are stored in a per-repository file.
225 otherwise, they are stored in the .hgtags file, and a new
225 otherwise, they are stored in the .hgtags file, and a new
226 changeset is committed with the change.
226 changeset is committed with the change.
227
227
228 keyword arguments:
228 keyword arguments:
229
229
230 local: whether to store tags in non-version-controlled file
230 local: whether to store tags in non-version-controlled file
231 (default False)
231 (default False)
232
232
233 message: commit message to use if committing
233 message: commit message to use if committing
234
234
235 user: name of user to use if committing
235 user: name of user to use if committing
236
236
237 date: date tuple to use if committing'''
237 date: date tuple to use if committing'''
238
238
239 for x in self.status()[:5]:
239 for x in self.status()[:5]:
240 if '.hgtags' in x:
240 if '.hgtags' in x:
241 raise util.Abort(_('working copy of .hgtags is changed '
241 raise util.Abort(_('working copy of .hgtags is changed '
242 '(please commit .hgtags manually)'))
242 '(please commit .hgtags manually)'))
243
243
244 self.tags() # instantiate the cache
244 self.tags() # instantiate the cache
245 self._tag(names, node, message, local, user, date)
245 self._tag(names, node, message, local, user, date)
246
246
247 def tags(self):
247 def tags(self):
248 '''return a mapping of tag to node'''
248 '''return a mapping of tag to node'''
249 if self._tags is None:
249 if self._tags is None:
250 (self._tags, self._tagtypes) = self._findtags()
250 (self._tags, self._tagtypes) = self._findtags()
251
251
252 return self._tags
252 return self._tags
253
253
254 def _findtags(self):
254 def _findtags(self):
255 '''Do the hard work of finding tags. Return a pair of dicts
255 '''Do the hard work of finding tags. Return a pair of dicts
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 maps tag name to a string like \'global\' or \'local\'.
257 maps tag name to a string like \'global\' or \'local\'.
258 Subclasses or extensions are free to add their own tags, but
258 Subclasses or extensions are free to add their own tags, but
259 should be aware that the returned dicts will be retained for the
259 should be aware that the returned dicts will be retained for the
260 duration of the localrepo object.'''
260 duration of the localrepo object.'''
261
261
262 # XXX what tagtype should subclasses/extensions use? Currently
262 # XXX what tagtype should subclasses/extensions use? Currently
263 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 # Should each extension invent its own tag type? Should there
264 # Should each extension invent its own tag type? Should there
265 # be one tagtype for all such "virtual" tags? Or is the status
265 # be one tagtype for all such "virtual" tags? Or is the status
266 # quo fine?
266 # quo fine?
267
267
268 alltags = {} # map tag name to (node, hist)
268 alltags = {} # map tag name to (node, hist)
269 tagtypes = {}
269 tagtypes = {}
270
270
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
273
273
274 # Build the return dicts. Have to re-encode tag names because
274 # Build the return dicts. Have to re-encode tag names because
275 # the tags module always uses UTF-8 (in order not to lose info
275 # the tags module always uses UTF-8 (in order not to lose info
276 # writing to the cache), but the rest of Mercurial wants them in
276 # writing to the cache), but the rest of Mercurial wants them in
277 # local encoding.
277 # local encoding.
278 tags = {}
278 tags = {}
279 for (name, (node, hist)) in alltags.iteritems():
279 for (name, (node, hist)) in alltags.iteritems():
280 if node != nullid:
280 if node != nullid:
281 tags[encoding.tolocal(name)] = node
281 tags[encoding.tolocal(name)] = node
282 tags['tip'] = self.changelog.tip()
282 tags['tip'] = self.changelog.tip()
283 tagtypes = dict([(encoding.tolocal(name), value)
283 tagtypes = dict([(encoding.tolocal(name), value)
284 for (name, value) in tagtypes.iteritems()])
284 for (name, value) in tagtypes.iteritems()])
285 return (tags, tagtypes)
285 return (tags, tagtypes)
286
286
287 def tagtype(self, tagname):
287 def tagtype(self, tagname):
288 '''
288 '''
289 return the type of the given tag. result can be:
289 return the type of the given tag. result can be:
290
290
291 'local' : a local tag
291 'local' : a local tag
292 'global' : a global tag
292 'global' : a global tag
293 None : tag does not exist
293 None : tag does not exist
294 '''
294 '''
295
295
296 self.tags()
296 self.tags()
297
297
298 return self._tagtypes.get(tagname)
298 return self._tagtypes.get(tagname)
299
299
300 def tagslist(self):
300 def tagslist(self):
301 '''return a list of tags ordered by revision'''
301 '''return a list of tags ordered by revision'''
302 l = []
302 l = []
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 try:
304 try:
305 r = self.changelog.rev(n)
305 r = self.changelog.rev(n)
306 except:
306 except:
307 r = -2 # sort to the beginning of the list if unknown
307 r = -2 # sort to the beginning of the list if unknown
308 l.append((r, t, n))
308 l.append((r, t, n))
309 return [(t, n) for r, t, n in sorted(l)]
309 return [(t, n) for r, t, n in sorted(l)]
310
310
311 def nodetags(self, node):
311 def nodetags(self, node):
312 '''return the tags associated with a node'''
312 '''return the tags associated with a node'''
313 if not self.nodetagscache:
313 if not self.nodetagscache:
314 self.nodetagscache = {}
314 self.nodetagscache = {}
315 for t, n in self.tags().iteritems():
315 for t, n in self.tags().iteritems():
316 self.nodetagscache.setdefault(n, []).append(t)
316 self.nodetagscache.setdefault(n, []).append(t)
317 return self.nodetagscache.get(node, [])
317 return self.nodetagscache.get(node, [])
318
318
319 def _branchtags(self, partial, lrev):
319 def _branchtags(self, partial, lrev):
320 # TODO: rename this function?
320 # TODO: rename this function?
321 tiprev = len(self) - 1
321 tiprev = len(self) - 1
322 if lrev != tiprev:
322 if lrev != tiprev:
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325
325
326 return partial
326 return partial
327
327
328 def branchmap(self):
328 def branchmap(self):
329 '''returns a dictionary {branch: [branchheads]}'''
329 '''returns a dictionary {branch: [branchheads]}'''
330 tip = self.changelog.tip()
330 tip = self.changelog.tip()
331 if self._branchcache is not None and self._branchcachetip == tip:
331 if self._branchcache is not None and self._branchcachetip == tip:
332 return self._branchcache
332 return self._branchcache
333
333
334 oldtip = self._branchcachetip
334 oldtip = self._branchcachetip
335 self._branchcachetip = tip
335 self._branchcachetip = tip
336 if oldtip is None or oldtip not in self.changelog.nodemap:
336 if oldtip is None or oldtip not in self.changelog.nodemap:
337 partial, last, lrev = self._readbranchcache()
337 partial, last, lrev = self._readbranchcache()
338 else:
338 else:
339 lrev = self.changelog.rev(oldtip)
339 lrev = self.changelog.rev(oldtip)
340 partial = self._branchcache
340 partial = self._branchcache
341
341
342 self._branchtags(partial, lrev)
342 self._branchtags(partial, lrev)
343 # this private cache holds all heads (not just tips)
343 # this private cache holds all heads (not just tips)
344 self._branchcache = partial
344 self._branchcache = partial
345
345
346 return self._branchcache
346 return self._branchcache
347
347
348 def branchtags(self):
348 def branchtags(self):
349 '''return a dict where branch names map to the tipmost head of
349 '''return a dict where branch names map to the tipmost head of
350 the branch, open heads come before closed'''
350 the branch, open heads come before closed'''
351 bt = {}
351 bt = {}
352 for bn, heads in self.branchmap().iteritems():
352 for bn, heads in self.branchmap().iteritems():
353 head = None
353 tip = heads[-1]
354 for i in range(len(heads)-1, -1, -1):
354 for h in reversed(heads):
355 h = heads[i]
356 if 'close' not in self.changelog.read(h)[5]:
355 if 'close' not in self.changelog.read(h)[5]:
357 head = h
356 tip = h
358 break
357 break
359 # no open heads were found
358 bt[bn] = tip
360 if head is None:
361 head = heads[-1]
362 bt[bn] = head
363 return bt
359 return bt
364
360
365
361
366 def _readbranchcache(self):
362 def _readbranchcache(self):
367 partial = {}
363 partial = {}
368 try:
364 try:
369 f = self.opener("branchheads.cache")
365 f = self.opener("branchheads.cache")
370 lines = f.read().split('\n')
366 lines = f.read().split('\n')
371 f.close()
367 f.close()
372 except (IOError, OSError):
368 except (IOError, OSError):
373 return {}, nullid, nullrev
369 return {}, nullid, nullrev
374
370
375 try:
371 try:
376 last, lrev = lines.pop(0).split(" ", 1)
372 last, lrev = lines.pop(0).split(" ", 1)
377 last, lrev = bin(last), int(lrev)
373 last, lrev = bin(last), int(lrev)
378 if lrev >= len(self) or self[lrev].node() != last:
374 if lrev >= len(self) or self[lrev].node() != last:
379 # invalidate the cache
375 # invalidate the cache
380 raise ValueError('invalidating branch cache (tip differs)')
376 raise ValueError('invalidating branch cache (tip differs)')
381 for l in lines:
377 for l in lines:
382 if not l:
378 if not l:
383 continue
379 continue
384 node, label = l.split(" ", 1)
380 node, label = l.split(" ", 1)
385 partial.setdefault(label.strip(), []).append(bin(node))
381 partial.setdefault(label.strip(), []).append(bin(node))
386 except KeyboardInterrupt:
382 except KeyboardInterrupt:
387 raise
383 raise
388 except Exception, inst:
384 except Exception, inst:
389 if self.ui.debugflag:
385 if self.ui.debugflag:
390 self.ui.warn(str(inst), '\n')
386 self.ui.warn(str(inst), '\n')
391 partial, last, lrev = {}, nullid, nullrev
387 partial, last, lrev = {}, nullid, nullrev
392 return partial, last, lrev
388 return partial, last, lrev
393
389
394 def _writebranchcache(self, branches, tip, tiprev):
390 def _writebranchcache(self, branches, tip, tiprev):
395 try:
391 try:
396 f = self.opener("branchheads.cache", "w", atomictemp=True)
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
397 f.write("%s %s\n" % (hex(tip), tiprev))
393 f.write("%s %s\n" % (hex(tip), tiprev))
398 for label, nodes in branches.iteritems():
394 for label, nodes in branches.iteritems():
399 for node in nodes:
395 for node in nodes:
400 f.write("%s %s\n" % (hex(node), label))
396 f.write("%s %s\n" % (hex(node), label))
401 f.rename()
397 f.rename()
402 except (IOError, OSError):
398 except (IOError, OSError):
403 pass
399 pass
404
400
405 def _updatebranchcache(self, partial, start, end):
401 def _updatebranchcache(self, partial, start, end):
406 # collect new branch entries
402 # collect new branch entries
407 newbranches = {}
403 newbranches = {}
408 for r in xrange(start, end):
404 for r in xrange(start, end):
409 c = self[r]
405 c = self[r]
410 newbranches.setdefault(c.branch(), []).append(c.node())
406 newbranches.setdefault(c.branch(), []).append(c.node())
411 # if older branchheads are reachable from new ones, they aren't
407 # if older branchheads are reachable from new ones, they aren't
412 # really branchheads. Note checking parents is insufficient:
408 # really branchheads. Note checking parents is insufficient:
413 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
414 for branch, newnodes in newbranches.iteritems():
410 for branch, newnodes in newbranches.iteritems():
415 bheads = partial.setdefault(branch, [])
411 bheads = partial.setdefault(branch, [])
416 bheads.extend(newnodes)
412 bheads.extend(newnodes)
417 if len(bheads) < 2:
413 if len(bheads) < 2:
418 continue
414 continue
419 newbheads = []
415 newbheads = []
420 # starting from tip means fewer passes over reachable
416 # starting from tip means fewer passes over reachable
421 while newnodes:
417 while newnodes:
422 latest = newnodes.pop()
418 latest = newnodes.pop()
423 if latest not in bheads:
419 if latest not in bheads:
424 continue
420 continue
425 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
426 reachable = self.changelog.reachable(latest, minbhrev)
422 reachable = self.changelog.reachable(latest, minbhrev)
427 bheads = [b for b in bheads if b not in reachable]
423 bheads = [b for b in bheads if b not in reachable]
428 newbheads.insert(0, latest)
424 newbheads.insert(0, latest)
429 bheads.extend(newbheads)
425 bheads.extend(newbheads)
430 partial[branch] = bheads
426 partial[branch] = bheads
431
427
432 def lookup(self, key):
428 def lookup(self, key):
433 if isinstance(key, int):
429 if isinstance(key, int):
434 return self.changelog.node(key)
430 return self.changelog.node(key)
435 elif key == '.':
431 elif key == '.':
436 return self.dirstate.parents()[0]
432 return self.dirstate.parents()[0]
437 elif key == 'null':
433 elif key == 'null':
438 return nullid
434 return nullid
439 elif key == 'tip':
435 elif key == 'tip':
440 return self.changelog.tip()
436 return self.changelog.tip()
441 n = self.changelog._match(key)
437 n = self.changelog._match(key)
442 if n:
438 if n:
443 return n
439 return n
444 if key in self.tags():
440 if key in self.tags():
445 return self.tags()[key]
441 return self.tags()[key]
446 if key in self.branchtags():
442 if key in self.branchtags():
447 return self.branchtags()[key]
443 return self.branchtags()[key]
448 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
449 if n:
445 if n:
450 return n
446 return n
451
447
452 # can't find key, check if it might have come from damaged dirstate
448 # can't find key, check if it might have come from damaged dirstate
453 if key in self.dirstate.parents():
449 if key in self.dirstate.parents():
454 raise error.Abort(_("working directory has unknown parent '%s'!")
450 raise error.Abort(_("working directory has unknown parent '%s'!")
455 % short(key))
451 % short(key))
456 try:
452 try:
457 if len(key) == 20:
453 if len(key) == 20:
458 key = hex(key)
454 key = hex(key)
459 except:
455 except:
460 pass
456 pass
461 raise error.RepoLookupError(_("unknown revision '%s'") % key)
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
462
458
463 def local(self):
459 def local(self):
464 return True
460 return True
465
461
466 def join(self, f):
462 def join(self, f):
467 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
468
464
469 def wjoin(self, f):
465 def wjoin(self, f):
470 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
471
467
472 def rjoin(self, f):
468 def rjoin(self, f):
473 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
474
470
475 def file(self, f):
471 def file(self, f):
476 if f[0] == '/':
472 if f[0] == '/':
477 f = f[1:]
473 f = f[1:]
478 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
479
475
480 def changectx(self, changeid):
476 def changectx(self, changeid):
481 return self[changeid]
477 return self[changeid]
482
478
483 def parents(self, changeid=None):
479 def parents(self, changeid=None):
484 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
485 return self[changeid].parents()
481 return self[changeid].parents()
486
482
487 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
488 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
489 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
490 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
491
487
492 def getcwd(self):
488 def getcwd(self):
493 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
494
490
495 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
496 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
497
493
498 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
499 return self.wopener(f, mode)
495 return self.wopener(f, mode)
500
496
501 def _link(self, f):
497 def _link(self, f):
502 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
503
499
504 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
505 if filter not in self.filterpats:
501 if filter not in self.filterpats:
506 l = []
502 l = []
507 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
508 if cmd == '!':
504 if cmd == '!':
509 continue
505 continue
510 mf = match_.match(self.root, '', [pat])
506 mf = match_.match(self.root, '', [pat])
511 fn = None
507 fn = None
512 params = cmd
508 params = cmd
513 for name, filterfn in self._datafilters.iteritems():
509 for name, filterfn in self._datafilters.iteritems():
514 if cmd.startswith(name):
510 if cmd.startswith(name):
515 fn = filterfn
511 fn = filterfn
516 params = cmd[len(name):].lstrip()
512 params = cmd[len(name):].lstrip()
517 break
513 break
518 if not fn:
514 if not fn:
519 fn = lambda s, c, **kwargs: util.filter(s, c)
515 fn = lambda s, c, **kwargs: util.filter(s, c)
520 # Wrap old filters not supporting keyword arguments
516 # Wrap old filters not supporting keyword arguments
521 if not inspect.getargspec(fn)[2]:
517 if not inspect.getargspec(fn)[2]:
522 oldfn = fn
518 oldfn = fn
523 fn = lambda s, c, **kwargs: oldfn(s, c)
519 fn = lambda s, c, **kwargs: oldfn(s, c)
524 l.append((mf, fn, params))
520 l.append((mf, fn, params))
525 self.filterpats[filter] = l
521 self.filterpats[filter] = l
526
522
527 for mf, fn, cmd in self.filterpats[filter]:
523 for mf, fn, cmd in self.filterpats[filter]:
528 if mf(filename):
524 if mf(filename):
529 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
530 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
531 break
527 break
532
528
533 return data
529 return data
534
530
535 def adddatafilter(self, name, filter):
531 def adddatafilter(self, name, filter):
536 self._datafilters[name] = filter
532 self._datafilters[name] = filter
537
533
538 def wread(self, filename):
534 def wread(self, filename):
539 if self._link(filename):
535 if self._link(filename):
540 data = os.readlink(self.wjoin(filename))
536 data = os.readlink(self.wjoin(filename))
541 else:
537 else:
542 data = self.wopener(filename, 'r').read()
538 data = self.wopener(filename, 'r').read()
543 return self._filter("encode", filename, data)
539 return self._filter("encode", filename, data)
544
540
545 def wwrite(self, filename, data, flags):
541 def wwrite(self, filename, data, flags):
546 data = self._filter("decode", filename, data)
542 data = self._filter("decode", filename, data)
547 try:
543 try:
548 os.unlink(self.wjoin(filename))
544 os.unlink(self.wjoin(filename))
549 except OSError:
545 except OSError:
550 pass
546 pass
551 if 'l' in flags:
547 if 'l' in flags:
552 self.wopener.symlink(data, filename)
548 self.wopener.symlink(data, filename)
553 else:
549 else:
554 self.wopener(filename, 'w').write(data)
550 self.wopener(filename, 'w').write(data)
555 if 'x' in flags:
551 if 'x' in flags:
556 util.set_flags(self.wjoin(filename), False, True)
552 util.set_flags(self.wjoin(filename), False, True)
557
553
558 def wwritedata(self, filename, data):
554 def wwritedata(self, filename, data):
559 return self._filter("decode", filename, data)
555 return self._filter("decode", filename, data)
560
556
561 def transaction(self):
557 def transaction(self):
562 tr = self._transref and self._transref() or None
558 tr = self._transref and self._transref() or None
563 if tr and tr.running():
559 if tr and tr.running():
564 return tr.nest()
560 return tr.nest()
565
561
566 # abort here if the journal already exists
562 # abort here if the journal already exists
567 if os.path.exists(self.sjoin("journal")):
563 if os.path.exists(self.sjoin("journal")):
568 raise error.RepoError(
564 raise error.RepoError(
569 _("abandoned transaction found - run hg recover"))
565 _("abandoned transaction found - run hg recover"))
570
566
571 # save dirstate for rollback
567 # save dirstate for rollback
572 try:
568 try:
573 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
574 except IOError:
570 except IOError:
575 ds = ""
571 ds = ""
576 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
578
574
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
582 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
583 self.sjoin("journal"),
579 self.sjoin("journal"),
584 aftertrans(renames),
580 aftertrans(renames),
585 self.store.createmode)
581 self.store.createmode)
586 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
587 return tr
583 return tr
588
584
589 def recover(self):
585 def recover(self):
590 lock = self.lock()
586 lock = self.lock()
591 try:
587 try:
592 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
593 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
594 transaction.rollback(self.sopener, self.sjoin("journal"),
590 transaction.rollback(self.sopener, self.sjoin("journal"),
595 self.ui.warn)
591 self.ui.warn)
596 self.invalidate()
592 self.invalidate()
597 return True
593 return True
598 else:
594 else:
599 self.ui.warn(_("no interrupted transaction available\n"))
595 self.ui.warn(_("no interrupted transaction available\n"))
600 return False
596 return False
601 finally:
597 finally:
602 lock.release()
598 lock.release()
603
599
604 def rollback(self):
600 def rollback(self):
605 wlock = lock = None
601 wlock = lock = None
606 try:
602 try:
607 wlock = self.wlock()
603 wlock = self.wlock()
608 lock = self.lock()
604 lock = self.lock()
609 if os.path.exists(self.sjoin("undo")):
605 if os.path.exists(self.sjoin("undo")):
610 self.ui.status(_("rolling back last transaction\n"))
606 self.ui.status(_("rolling back last transaction\n"))
611 transaction.rollback(self.sopener, self.sjoin("undo"),
607 transaction.rollback(self.sopener, self.sjoin("undo"),
612 self.ui.warn)
608 self.ui.warn)
613 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
614 try:
610 try:
615 branch = self.opener("undo.branch").read()
611 branch = self.opener("undo.branch").read()
616 self.dirstate.setbranch(branch)
612 self.dirstate.setbranch(branch)
617 except IOError:
613 except IOError:
618 self.ui.warn(_("Named branch could not be reset, "
614 self.ui.warn(_("Named branch could not be reset, "
619 "current branch still is: %s\n")
615 "current branch still is: %s\n")
620 % encoding.tolocal(self.dirstate.branch()))
616 % encoding.tolocal(self.dirstate.branch()))
621 self.invalidate()
617 self.invalidate()
622 self.dirstate.invalidate()
618 self.dirstate.invalidate()
623 self.destroyed()
619 self.destroyed()
624 else:
620 else:
625 self.ui.warn(_("no rollback information available\n"))
621 self.ui.warn(_("no rollback information available\n"))
626 finally:
622 finally:
627 release(lock, wlock)
623 release(lock, wlock)
628
624
629 def invalidate(self):
625 def invalidate(self):
630 for a in "changelog manifest".split():
626 for a in "changelog manifest".split():
631 if a in self.__dict__:
627 if a in self.__dict__:
632 delattr(self, a)
628 delattr(self, a)
633 self._tags = None
629 self._tags = None
634 self._tagtypes = None
630 self._tagtypes = None
635 self.nodetagscache = None
631 self.nodetagscache = None
636 self._branchcache = None # in UTF-8
632 self._branchcache = None # in UTF-8
637 self._branchcachetip = None
633 self._branchcachetip = None
638
634
639 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
640 try:
636 try:
641 l = lock.lock(lockname, 0, releasefn, desc=desc)
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
642 except error.LockHeld, inst:
638 except error.LockHeld, inst:
643 if not wait:
639 if not wait:
644 raise
640 raise
645 self.ui.warn(_("waiting for lock on %s held by %r\n") %
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
646 (desc, inst.locker))
642 (desc, inst.locker))
647 # default to 600 seconds timeout
643 # default to 600 seconds timeout
648 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
649 releasefn, desc=desc)
645 releasefn, desc=desc)
650 if acquirefn:
646 if acquirefn:
651 acquirefn()
647 acquirefn()
652 return l
648 return l
653
649
654 def lock(self, wait=True):
650 def lock(self, wait=True):
655 '''Lock the repository store (.hg/store) and return a weak reference
651 '''Lock the repository store (.hg/store) and return a weak reference
656 to the lock. Use this before modifying the store (e.g. committing or
652 to the lock. Use this before modifying the store (e.g. committing or
657 stripping). If you are opening a transaction, get a lock as well.)'''
653 stripping). If you are opening a transaction, get a lock as well.)'''
658 l = self._lockref and self._lockref()
654 l = self._lockref and self._lockref()
659 if l is not None and l.held:
655 if l is not None and l.held:
660 l.lock()
656 l.lock()
661 return l
657 return l
662
658
663 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
659 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
664 _('repository %s') % self.origroot)
660 _('repository %s') % self.origroot)
665 self._lockref = weakref.ref(l)
661 self._lockref = weakref.ref(l)
666 return l
662 return l
667
663
668 def wlock(self, wait=True):
664 def wlock(self, wait=True):
669 '''Lock the non-store parts of the repository (everything under
665 '''Lock the non-store parts of the repository (everything under
670 .hg except .hg/store) and return a weak reference to the lock.
666 .hg except .hg/store) and return a weak reference to the lock.
671 Use this before modifying files in .hg.'''
667 Use this before modifying files in .hg.'''
672 l = self._wlockref and self._wlockref()
668 l = self._wlockref and self._wlockref()
673 if l is not None and l.held:
669 if l is not None and l.held:
674 l.lock()
670 l.lock()
675 return l
671 return l
676
672
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
678 self.dirstate.invalidate, _('working directory of %s') %
674 self.dirstate.invalidate, _('working directory of %s') %
679 self.origroot)
675 self.origroot)
680 self._wlockref = weakref.ref(l)
676 self._wlockref = weakref.ref(l)
681 return l
677 return l
682
678
683 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
684 """
680 """
685 commit an individual file as part of a larger transaction
681 commit an individual file as part of a larger transaction
686 """
682 """
687
683
688 fname = fctx.path()
684 fname = fctx.path()
689 text = fctx.data()
685 text = fctx.data()
690 flog = self.file(fname)
686 flog = self.file(fname)
691 fparent1 = manifest1.get(fname, nullid)
687 fparent1 = manifest1.get(fname, nullid)
692 fparent2 = fparent2o = manifest2.get(fname, nullid)
688 fparent2 = fparent2o = manifest2.get(fname, nullid)
693
689
694 meta = {}
690 meta = {}
695 copy = fctx.renamed()
691 copy = fctx.renamed()
696 if copy and copy[0] != fname:
692 if copy and copy[0] != fname:
697 # Mark the new revision of this file as a copy of another
693 # Mark the new revision of this file as a copy of another
698 # file. This copy data will effectively act as a parent
694 # file. This copy data will effectively act as a parent
699 # of this new revision. If this is a merge, the first
695 # of this new revision. If this is a merge, the first
700 # parent will be the nullid (meaning "look up the copy data")
696 # parent will be the nullid (meaning "look up the copy data")
701 # and the second one will be the other parent. For example:
697 # and the second one will be the other parent. For example:
702 #
698 #
703 # 0 --- 1 --- 3 rev1 changes file foo
699 # 0 --- 1 --- 3 rev1 changes file foo
704 # \ / rev2 renames foo to bar and changes it
700 # \ / rev2 renames foo to bar and changes it
705 # \- 2 -/ rev3 should have bar with all changes and
701 # \- 2 -/ rev3 should have bar with all changes and
706 # should record that bar descends from
702 # should record that bar descends from
707 # bar in rev2 and foo in rev1
703 # bar in rev2 and foo in rev1
708 #
704 #
709 # this allows this merge to succeed:
705 # this allows this merge to succeed:
710 #
706 #
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
707 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
708 # \ / merging rev3 and rev4 should use bar@rev2
713 # \- 2 --- 4 as the merge base
709 # \- 2 --- 4 as the merge base
714 #
710 #
715
711
716 cfname = copy[0]
712 cfname = copy[0]
717 crev = manifest1.get(cfname)
713 crev = manifest1.get(cfname)
718 newfparent = fparent2
714 newfparent = fparent2
719
715
720 if manifest2: # branch merge
716 if manifest2: # branch merge
721 if fparent2 == nullid or crev is None: # copied on remote side
717 if fparent2 == nullid or crev is None: # copied on remote side
722 if cfname in manifest2:
718 if cfname in manifest2:
723 crev = manifest2[cfname]
719 crev = manifest2[cfname]
724 newfparent = fparent1
720 newfparent = fparent1
725
721
726 # find source in nearest ancestor if we've lost track
722 # find source in nearest ancestor if we've lost track
727 if not crev:
723 if not crev:
728 self.ui.debug(" %s: searching for copy revision for %s\n" %
724 self.ui.debug(" %s: searching for copy revision for %s\n" %
729 (fname, cfname))
725 (fname, cfname))
730 for ancestor in self['.'].ancestors():
726 for ancestor in self['.'].ancestors():
731 if cfname in ancestor:
727 if cfname in ancestor:
732 crev = ancestor[cfname].filenode()
728 crev = ancestor[cfname].filenode()
733 break
729 break
734
730
735 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
731 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
736 meta["copy"] = cfname
732 meta["copy"] = cfname
737 meta["copyrev"] = hex(crev)
733 meta["copyrev"] = hex(crev)
738 fparent1, fparent2 = nullid, newfparent
734 fparent1, fparent2 = nullid, newfparent
739 elif fparent2 != nullid:
735 elif fparent2 != nullid:
740 # is one parent an ancestor of the other?
736 # is one parent an ancestor of the other?
741 fparentancestor = flog.ancestor(fparent1, fparent2)
737 fparentancestor = flog.ancestor(fparent1, fparent2)
742 if fparentancestor == fparent1:
738 if fparentancestor == fparent1:
743 fparent1, fparent2 = fparent2, nullid
739 fparent1, fparent2 = fparent2, nullid
744 elif fparentancestor == fparent2:
740 elif fparentancestor == fparent2:
745 fparent2 = nullid
741 fparent2 = nullid
746
742
747 # is the file changed?
743 # is the file changed?
748 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
744 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
749 changelist.append(fname)
745 changelist.append(fname)
750 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
746 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
751
747
752 # are just the flags changed during merge?
748 # are just the flags changed during merge?
753 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
749 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
754 changelist.append(fname)
750 changelist.append(fname)
755
751
756 return fparent1
752 return fparent1
757
753
758 def commit(self, text="", user=None, date=None, match=None, force=False,
754 def commit(self, text="", user=None, date=None, match=None, force=False,
759 editor=False, extra={}):
755 editor=False, extra={}):
760 """Add a new revision to current repository.
756 """Add a new revision to current repository.
761
757
762 Revision information is gathered from the working directory,
758 Revision information is gathered from the working directory,
763 match can be used to filter the committed files. If editor is
759 match can be used to filter the committed files. If editor is
764 supplied, it is called to get a commit message.
760 supplied, it is called to get a commit message.
765 """
761 """
766
762
767 def fail(f, msg):
763 def fail(f, msg):
768 raise util.Abort('%s: %s' % (f, msg))
764 raise util.Abort('%s: %s' % (f, msg))
769
765
770 if not match:
766 if not match:
771 match = match_.always(self.root, '')
767 match = match_.always(self.root, '')
772
768
773 if not force:
769 if not force:
774 vdirs = []
770 vdirs = []
775 match.dir = vdirs.append
771 match.dir = vdirs.append
776 match.bad = fail
772 match.bad = fail
777
773
778 wlock = self.wlock()
774 wlock = self.wlock()
779 try:
775 try:
780 p1, p2 = self.dirstate.parents()
776 p1, p2 = self.dirstate.parents()
781 wctx = self[None]
777 wctx = self[None]
782
778
783 if (not force and p2 != nullid and match and
779 if (not force and p2 != nullid and match and
784 (match.files() or match.anypats())):
780 (match.files() or match.anypats())):
785 raise util.Abort(_('cannot partially commit a merge '
781 raise util.Abort(_('cannot partially commit a merge '
786 '(do not specify files or patterns)'))
782 '(do not specify files or patterns)'))
787
783
788 changes = self.status(match=match, clean=force)
784 changes = self.status(match=match, clean=force)
789 if force:
785 if force:
790 changes[0].extend(changes[6]) # mq may commit unchanged files
786 changes[0].extend(changes[6]) # mq may commit unchanged files
791
787
792 # check subrepos
788 # check subrepos
793 subs = []
789 subs = []
794 for s in wctx.substate:
790 for s in wctx.substate:
795 if match(s) and wctx.sub(s).dirty():
791 if match(s) and wctx.sub(s).dirty():
796 subs.append(s)
792 subs.append(s)
797 if subs and '.hgsubstate' not in changes[0]:
793 if subs and '.hgsubstate' not in changes[0]:
798 changes[0].insert(0, '.hgsubstate')
794 changes[0].insert(0, '.hgsubstate')
799
795
800 # make sure all explicit patterns are matched
796 # make sure all explicit patterns are matched
801 if not force and match.files():
797 if not force and match.files():
802 matched = set(changes[0] + changes[1] + changes[2])
798 matched = set(changes[0] + changes[1] + changes[2])
803
799
804 for f in match.files():
800 for f in match.files():
805 if f == '.' or f in matched or f in wctx.substate:
801 if f == '.' or f in matched or f in wctx.substate:
806 continue
802 continue
807 if f in changes[3]: # missing
803 if f in changes[3]: # missing
808 fail(f, _('file not found!'))
804 fail(f, _('file not found!'))
809 if f in vdirs: # visited directory
805 if f in vdirs: # visited directory
810 d = f + '/'
806 d = f + '/'
811 for mf in matched:
807 for mf in matched:
812 if mf.startswith(d):
808 if mf.startswith(d):
813 break
809 break
814 else:
810 else:
815 fail(f, _("no match under directory!"))
811 fail(f, _("no match under directory!"))
816 elif f not in self.dirstate:
812 elif f not in self.dirstate:
817 fail(f, _("file not tracked!"))
813 fail(f, _("file not tracked!"))
818
814
819 if (not force and not extra.get("close") and p2 == nullid
815 if (not force and not extra.get("close") and p2 == nullid
820 and not (changes[0] or changes[1] or changes[2])
816 and not (changes[0] or changes[1] or changes[2])
821 and self[None].branch() == self['.'].branch()):
817 and self[None].branch() == self['.'].branch()):
822 return None
818 return None
823
819
824 ms = merge_.mergestate(self)
820 ms = merge_.mergestate(self)
825 for f in changes[0]:
821 for f in changes[0]:
826 if f in ms and ms[f] == 'u':
822 if f in ms and ms[f] == 'u':
827 raise util.Abort(_("unresolved merge conflicts "
823 raise util.Abort(_("unresolved merge conflicts "
828 "(see hg resolve)"))
824 "(see hg resolve)"))
829
825
830 cctx = context.workingctx(self, (p1, p2), text, user, date,
826 cctx = context.workingctx(self, (p1, p2), text, user, date,
831 extra, changes)
827 extra, changes)
832 if editor:
828 if editor:
833 cctx._text = editor(self, cctx, subs)
829 cctx._text = editor(self, cctx, subs)
834 edited = (text != cctx._text)
830 edited = (text != cctx._text)
835
831
836 # commit subs
832 # commit subs
837 if subs:
833 if subs:
838 state = wctx.substate.copy()
834 state = wctx.substate.copy()
839 for s in subs:
835 for s in subs:
840 self.ui.status(_('committing subrepository %s\n') % s)
836 self.ui.status(_('committing subrepository %s\n') % s)
841 sr = wctx.sub(s).commit(cctx._text, user, date)
837 sr = wctx.sub(s).commit(cctx._text, user, date)
842 state[s] = (state[s][0], sr)
838 state[s] = (state[s][0], sr)
843 subrepo.writestate(self, state)
839 subrepo.writestate(self, state)
844
840
845 # Save commit message in case this transaction gets rolled back
841 # Save commit message in case this transaction gets rolled back
846 # (e.g. by a pretxncommit hook). Leave the content alone on
842 # (e.g. by a pretxncommit hook). Leave the content alone on
847 # the assumption that the user will use the same editor again.
843 # the assumption that the user will use the same editor again.
848 msgfile = self.opener('last-message.txt', 'wb')
844 msgfile = self.opener('last-message.txt', 'wb')
849 msgfile.write(cctx._text)
845 msgfile.write(cctx._text)
850 msgfile.close()
846 msgfile.close()
851
847
852 try:
848 try:
853 ret = self.commitctx(cctx, True)
849 ret = self.commitctx(cctx, True)
854 except:
850 except:
855 if edited:
851 if edited:
856 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
852 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
857 self.ui.write(
853 self.ui.write(
858 _('note: commit message saved in %s\n') % msgfn)
854 _('note: commit message saved in %s\n') % msgfn)
859 raise
855 raise
860
856
861 # update dirstate and mergestate
857 # update dirstate and mergestate
862 for f in changes[0] + changes[1]:
858 for f in changes[0] + changes[1]:
863 self.dirstate.normal(f)
859 self.dirstate.normal(f)
864 for f in changes[2]:
860 for f in changes[2]:
865 self.dirstate.forget(f)
861 self.dirstate.forget(f)
866 self.dirstate.setparents(ret)
862 self.dirstate.setparents(ret)
867 ms.reset()
863 ms.reset()
868
864
869 return ret
865 return ret
870
866
871 finally:
867 finally:
872 wlock.release()
868 wlock.release()
873
869
874 def commitctx(self, ctx, error=False):
870 def commitctx(self, ctx, error=False):
875 """Add a new revision to current repository.
871 """Add a new revision to current repository.
876
872
877 Revision information is passed via the context argument.
873 Revision information is passed via the context argument.
878 """
874 """
879
875
880 tr = lock = None
876 tr = lock = None
881 removed = ctx.removed()
877 removed = ctx.removed()
882 p1, p2 = ctx.p1(), ctx.p2()
878 p1, p2 = ctx.p1(), ctx.p2()
883 m1 = p1.manifest().copy()
879 m1 = p1.manifest().copy()
884 m2 = p2.manifest()
880 m2 = p2.manifest()
885 user = ctx.user()
881 user = ctx.user()
886
882
887 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
883 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
888 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
884 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
889
885
890 lock = self.lock()
886 lock = self.lock()
891 try:
887 try:
892 tr = self.transaction()
888 tr = self.transaction()
893 trp = weakref.proxy(tr)
889 trp = weakref.proxy(tr)
894
890
895 # check in files
891 # check in files
896 new = {}
892 new = {}
897 changed = []
893 changed = []
898 linkrev = len(self)
894 linkrev = len(self)
899 for f in sorted(ctx.modified() + ctx.added()):
895 for f in sorted(ctx.modified() + ctx.added()):
900 self.ui.note(f + "\n")
896 self.ui.note(f + "\n")
901 try:
897 try:
902 fctx = ctx[f]
898 fctx = ctx[f]
903 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
899 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
904 changed)
900 changed)
905 m1.set(f, fctx.flags())
901 m1.set(f, fctx.flags())
906 except (OSError, IOError):
902 except (OSError, IOError):
907 if error:
903 if error:
908 self.ui.warn(_("trouble committing %s!\n") % f)
904 self.ui.warn(_("trouble committing %s!\n") % f)
909 raise
905 raise
910 else:
906 else:
911 removed.append(f)
907 removed.append(f)
912
908
913 # update manifest
909 # update manifest
914 m1.update(new)
910 m1.update(new)
915 removed = [f for f in sorted(removed) if f in m1 or f in m2]
911 removed = [f for f in sorted(removed) if f in m1 or f in m2]
916 drop = [f for f in removed if f in m1]
912 drop = [f for f in removed if f in m1]
917 for f in drop:
913 for f in drop:
918 del m1[f]
914 del m1[f]
919 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
915 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
920 p2.manifestnode(), (new, drop))
916 p2.manifestnode(), (new, drop))
921
917
922 # update changelog
918 # update changelog
923 self.changelog.delayupdate()
919 self.changelog.delayupdate()
924 n = self.changelog.add(mn, changed + removed, ctx.description(),
920 n = self.changelog.add(mn, changed + removed, ctx.description(),
925 trp, p1.node(), p2.node(),
921 trp, p1.node(), p2.node(),
926 user, ctx.date(), ctx.extra().copy())
922 user, ctx.date(), ctx.extra().copy())
927 p = lambda: self.changelog.writepending() and self.root or ""
923 p = lambda: self.changelog.writepending() and self.root or ""
928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
924 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
929 parent2=xp2, pending=p)
925 parent2=xp2, pending=p)
930 self.changelog.finalize(trp)
926 self.changelog.finalize(trp)
931 tr.close()
927 tr.close()
932
928
933 if self._branchcache:
929 if self._branchcache:
934 self.branchtags()
930 self.branchtags()
935
931
936 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
937 return n
933 return n
938 finally:
934 finally:
939 del tr
935 del tr
940 lock.release()
936 lock.release()
941
937
942 def destroyed(self):
938 def destroyed(self):
943 '''Inform the repository that nodes have been destroyed.
939 '''Inform the repository that nodes have been destroyed.
944 Intended for use by strip and rollback, so there's a common
940 Intended for use by strip and rollback, so there's a common
945 place for anything that has to be done after destroying history.'''
941 place for anything that has to be done after destroying history.'''
946 # XXX it might be nice if we could take the list of destroyed
942 # XXX it might be nice if we could take the list of destroyed
947 # nodes, but I don't see an easy way for rollback() to do that
943 # nodes, but I don't see an easy way for rollback() to do that
948
944
949 # Ensure the persistent tag cache is updated. Doing it now
945 # Ensure the persistent tag cache is updated. Doing it now
950 # means that the tag cache only has to worry about destroyed
946 # means that the tag cache only has to worry about destroyed
951 # heads immediately after a strip/rollback. That in turn
947 # heads immediately after a strip/rollback. That in turn
952 # guarantees that "cachetip == currenttip" (comparing both rev
948 # guarantees that "cachetip == currenttip" (comparing both rev
953 # and node) always means no nodes have been added or destroyed.
949 # and node) always means no nodes have been added or destroyed.
954
950
955 # XXX this is suboptimal when qrefresh'ing: we strip the current
951 # XXX this is suboptimal when qrefresh'ing: we strip the current
956 # head, refresh the tag cache, then immediately add a new head.
952 # head, refresh the tag cache, then immediately add a new head.
957 # But I think doing it this way is necessary for the "instant
953 # But I think doing it this way is necessary for the "instant
958 # tag cache retrieval" case to work.
954 # tag cache retrieval" case to work.
959 tags_.findglobaltags(self.ui, self, {}, {})
955 tags_.findglobaltags(self.ui, self, {}, {})
960
956
961 def walk(self, match, node=None):
957 def walk(self, match, node=None):
962 '''
958 '''
963 walk recursively through the directory tree or a given
959 walk recursively through the directory tree or a given
964 changeset, finding all files matched by the match
960 changeset, finding all files matched by the match
965 function
961 function
966 '''
962 '''
967 return self[node].walk(match)
963 return self[node].walk(match)
968
964
969 def status(self, node1='.', node2=None, match=None,
965 def status(self, node1='.', node2=None, match=None,
970 ignored=False, clean=False, unknown=False):
966 ignored=False, clean=False, unknown=False):
971 """return status of files between two nodes or node and working directory
967 """return status of files between two nodes or node and working directory
972
968
973 If node1 is None, use the first dirstate parent instead.
969 If node1 is None, use the first dirstate parent instead.
974 If node2 is None, compare node1 with working directory.
970 If node2 is None, compare node1 with working directory.
975 """
971 """
976
972
977 def mfmatches(ctx):
973 def mfmatches(ctx):
978 mf = ctx.manifest().copy()
974 mf = ctx.manifest().copy()
979 for fn in mf.keys():
975 for fn in mf.keys():
980 if not match(fn):
976 if not match(fn):
981 del mf[fn]
977 del mf[fn]
982 return mf
978 return mf
983
979
984 if isinstance(node1, context.changectx):
980 if isinstance(node1, context.changectx):
985 ctx1 = node1
981 ctx1 = node1
986 else:
982 else:
987 ctx1 = self[node1]
983 ctx1 = self[node1]
988 if isinstance(node2, context.changectx):
984 if isinstance(node2, context.changectx):
989 ctx2 = node2
985 ctx2 = node2
990 else:
986 else:
991 ctx2 = self[node2]
987 ctx2 = self[node2]
992
988
993 working = ctx2.rev() is None
989 working = ctx2.rev() is None
994 parentworking = working and ctx1 == self['.']
990 parentworking = working and ctx1 == self['.']
995 match = match or match_.always(self.root, self.getcwd())
991 match = match or match_.always(self.root, self.getcwd())
996 listignored, listclean, listunknown = ignored, clean, unknown
992 listignored, listclean, listunknown = ignored, clean, unknown
997
993
998 # load earliest manifest first for caching reasons
994 # load earliest manifest first for caching reasons
999 if not working and ctx2.rev() < ctx1.rev():
995 if not working and ctx2.rev() < ctx1.rev():
1000 ctx2.manifest()
996 ctx2.manifest()
1001
997
1002 if not parentworking:
998 if not parentworking:
1003 def bad(f, msg):
999 def bad(f, msg):
1004 if f not in ctx1:
1000 if f not in ctx1:
1005 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1001 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1006 match.bad = bad
1002 match.bad = bad
1007
1003
1008 if working: # we need to scan the working dir
1004 if working: # we need to scan the working dir
1009 subrepos = ctx1.substate.keys()
1005 subrepos = ctx1.substate.keys()
1010 s = self.dirstate.status(match, subrepos, listignored,
1006 s = self.dirstate.status(match, subrepos, listignored,
1011 listclean, listunknown)
1007 listclean, listunknown)
1012 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1008 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1013
1009
1014 # check for any possibly clean files
1010 # check for any possibly clean files
1015 if parentworking and cmp:
1011 if parentworking and cmp:
1016 fixup = []
1012 fixup = []
1017 # do a full compare of any files that might have changed
1013 # do a full compare of any files that might have changed
1018 for f in sorted(cmp):
1014 for f in sorted(cmp):
1019 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1015 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1020 or ctx1[f].cmp(ctx2[f].data())):
1016 or ctx1[f].cmp(ctx2[f].data())):
1021 modified.append(f)
1017 modified.append(f)
1022 else:
1018 else:
1023 fixup.append(f)
1019 fixup.append(f)
1024
1020
1025 if listclean:
1021 if listclean:
1026 clean += fixup
1022 clean += fixup
1027
1023
1028 # update dirstate for files that are actually clean
1024 # update dirstate for files that are actually clean
1029 if fixup:
1025 if fixup:
1030 try:
1026 try:
1031 # updating the dirstate is optional
1027 # updating the dirstate is optional
1032 # so we don't wait on the lock
1028 # so we don't wait on the lock
1033 wlock = self.wlock(False)
1029 wlock = self.wlock(False)
1034 try:
1030 try:
1035 for f in fixup:
1031 for f in fixup:
1036 self.dirstate.normal(f)
1032 self.dirstate.normal(f)
1037 finally:
1033 finally:
1038 wlock.release()
1034 wlock.release()
1039 except error.LockError:
1035 except error.LockError:
1040 pass
1036 pass
1041
1037
1042 if not parentworking:
1038 if not parentworking:
1043 mf1 = mfmatches(ctx1)
1039 mf1 = mfmatches(ctx1)
1044 if working:
1040 if working:
1045 # we are comparing working dir against non-parent
1041 # we are comparing working dir against non-parent
1046 # generate a pseudo-manifest for the working dir
1042 # generate a pseudo-manifest for the working dir
1047 mf2 = mfmatches(self['.'])
1043 mf2 = mfmatches(self['.'])
1048 for f in cmp + modified + added:
1044 for f in cmp + modified + added:
1049 mf2[f] = None
1045 mf2[f] = None
1050 mf2.set(f, ctx2.flags(f))
1046 mf2.set(f, ctx2.flags(f))
1051 for f in removed:
1047 for f in removed:
1052 if f in mf2:
1048 if f in mf2:
1053 del mf2[f]
1049 del mf2[f]
1054 else:
1050 else:
1055 # we are comparing two revisions
1051 # we are comparing two revisions
1056 deleted, unknown, ignored = [], [], []
1052 deleted, unknown, ignored = [], [], []
1057 mf2 = mfmatches(ctx2)
1053 mf2 = mfmatches(ctx2)
1058
1054
1059 modified, added, clean = [], [], []
1055 modified, added, clean = [], [], []
1060 for fn in mf2:
1056 for fn in mf2:
1061 if fn in mf1:
1057 if fn in mf1:
1062 if (mf1.flags(fn) != mf2.flags(fn) or
1058 if (mf1.flags(fn) != mf2.flags(fn) or
1063 (mf1[fn] != mf2[fn] and
1059 (mf1[fn] != mf2[fn] and
1064 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1060 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1065 modified.append(fn)
1061 modified.append(fn)
1066 elif listclean:
1062 elif listclean:
1067 clean.append(fn)
1063 clean.append(fn)
1068 del mf1[fn]
1064 del mf1[fn]
1069 else:
1065 else:
1070 added.append(fn)
1066 added.append(fn)
1071 removed = mf1.keys()
1067 removed = mf1.keys()
1072
1068
1073 r = modified, added, removed, deleted, unknown, ignored, clean
1069 r = modified, added, removed, deleted, unknown, ignored, clean
1074 [l.sort() for l in r]
1070 [l.sort() for l in r]
1075 return r
1071 return r
1076
1072
1077 def add(self, list):
1073 def add(self, list):
1078 wlock = self.wlock()
1074 wlock = self.wlock()
1079 try:
1075 try:
1080 rejected = []
1076 rejected = []
1081 for f in list:
1077 for f in list:
1082 p = self.wjoin(f)
1078 p = self.wjoin(f)
1083 try:
1079 try:
1084 st = os.lstat(p)
1080 st = os.lstat(p)
1085 except:
1081 except:
1086 self.ui.warn(_("%s does not exist!\n") % f)
1082 self.ui.warn(_("%s does not exist!\n") % f)
1087 rejected.append(f)
1083 rejected.append(f)
1088 continue
1084 continue
1089 if st.st_size > 10000000:
1085 if st.st_size > 10000000:
1090 self.ui.warn(_("%s: files over 10MB may cause memory and"
1086 self.ui.warn(_("%s: files over 10MB may cause memory and"
1091 " performance problems\n"
1087 " performance problems\n"
1092 "(use 'hg revert %s' to unadd the file)\n")
1088 "(use 'hg revert %s' to unadd the file)\n")
1093 % (f, f))
1089 % (f, f))
1094 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1090 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1095 self.ui.warn(_("%s not added: only files and symlinks "
1091 self.ui.warn(_("%s not added: only files and symlinks "
1096 "supported currently\n") % f)
1092 "supported currently\n") % f)
1097 rejected.append(p)
1093 rejected.append(p)
1098 elif self.dirstate[f] in 'amn':
1094 elif self.dirstate[f] in 'amn':
1099 self.ui.warn(_("%s already tracked!\n") % f)
1095 self.ui.warn(_("%s already tracked!\n") % f)
1100 elif self.dirstate[f] == 'r':
1096 elif self.dirstate[f] == 'r':
1101 self.dirstate.normallookup(f)
1097 self.dirstate.normallookup(f)
1102 else:
1098 else:
1103 self.dirstate.add(f)
1099 self.dirstate.add(f)
1104 return rejected
1100 return rejected
1105 finally:
1101 finally:
1106 wlock.release()
1102 wlock.release()
1107
1103
1108 def forget(self, list):
1104 def forget(self, list):
1109 wlock = self.wlock()
1105 wlock = self.wlock()
1110 try:
1106 try:
1111 for f in list:
1107 for f in list:
1112 if self.dirstate[f] != 'a':
1108 if self.dirstate[f] != 'a':
1113 self.ui.warn(_("%s not added!\n") % f)
1109 self.ui.warn(_("%s not added!\n") % f)
1114 else:
1110 else:
1115 self.dirstate.forget(f)
1111 self.dirstate.forget(f)
1116 finally:
1112 finally:
1117 wlock.release()
1113 wlock.release()
1118
1114
1119 def remove(self, list, unlink=False):
1115 def remove(self, list, unlink=False):
1120 if unlink:
1116 if unlink:
1121 for f in list:
1117 for f in list:
1122 try:
1118 try:
1123 util.unlink(self.wjoin(f))
1119 util.unlink(self.wjoin(f))
1124 except OSError, inst:
1120 except OSError, inst:
1125 if inst.errno != errno.ENOENT:
1121 if inst.errno != errno.ENOENT:
1126 raise
1122 raise
1127 wlock = self.wlock()
1123 wlock = self.wlock()
1128 try:
1124 try:
1129 for f in list:
1125 for f in list:
1130 if unlink and os.path.exists(self.wjoin(f)):
1126 if unlink and os.path.exists(self.wjoin(f)):
1131 self.ui.warn(_("%s still exists!\n") % f)
1127 self.ui.warn(_("%s still exists!\n") % f)
1132 elif self.dirstate[f] == 'a':
1128 elif self.dirstate[f] == 'a':
1133 self.dirstate.forget(f)
1129 self.dirstate.forget(f)
1134 elif f not in self.dirstate:
1130 elif f not in self.dirstate:
1135 self.ui.warn(_("%s not tracked!\n") % f)
1131 self.ui.warn(_("%s not tracked!\n") % f)
1136 else:
1132 else:
1137 self.dirstate.remove(f)
1133 self.dirstate.remove(f)
1138 finally:
1134 finally:
1139 wlock.release()
1135 wlock.release()
1140
1136
1141 def undelete(self, list):
1137 def undelete(self, list):
1142 manifests = [self.manifest.read(self.changelog.read(p)[0])
1138 manifests = [self.manifest.read(self.changelog.read(p)[0])
1143 for p in self.dirstate.parents() if p != nullid]
1139 for p in self.dirstate.parents() if p != nullid]
1144 wlock = self.wlock()
1140 wlock = self.wlock()
1145 try:
1141 try:
1146 for f in list:
1142 for f in list:
1147 if self.dirstate[f] != 'r':
1143 if self.dirstate[f] != 'r':
1148 self.ui.warn(_("%s not removed!\n") % f)
1144 self.ui.warn(_("%s not removed!\n") % f)
1149 else:
1145 else:
1150 m = f in manifests[0] and manifests[0] or manifests[1]
1146 m = f in manifests[0] and manifests[0] or manifests[1]
1151 t = self.file(f).read(m[f])
1147 t = self.file(f).read(m[f])
1152 self.wwrite(f, t, m.flags(f))
1148 self.wwrite(f, t, m.flags(f))
1153 self.dirstate.normal(f)
1149 self.dirstate.normal(f)
1154 finally:
1150 finally:
1155 wlock.release()
1151 wlock.release()
1156
1152
1157 def copy(self, source, dest):
1153 def copy(self, source, dest):
1158 p = self.wjoin(dest)
1154 p = self.wjoin(dest)
1159 if not (os.path.exists(p) or os.path.islink(p)):
1155 if not (os.path.exists(p) or os.path.islink(p)):
1160 self.ui.warn(_("%s does not exist!\n") % dest)
1156 self.ui.warn(_("%s does not exist!\n") % dest)
1161 elif not (os.path.isfile(p) or os.path.islink(p)):
1157 elif not (os.path.isfile(p) or os.path.islink(p)):
1162 self.ui.warn(_("copy failed: %s is not a file or a "
1158 self.ui.warn(_("copy failed: %s is not a file or a "
1163 "symbolic link\n") % dest)
1159 "symbolic link\n") % dest)
1164 else:
1160 else:
1165 wlock = self.wlock()
1161 wlock = self.wlock()
1166 try:
1162 try:
1167 if self.dirstate[dest] in '?r':
1163 if self.dirstate[dest] in '?r':
1168 self.dirstate.add(dest)
1164 self.dirstate.add(dest)
1169 self.dirstate.copy(source, dest)
1165 self.dirstate.copy(source, dest)
1170 finally:
1166 finally:
1171 wlock.release()
1167 wlock.release()
1172
1168
1173 def heads(self, start=None):
1169 def heads(self, start=None):
1174 heads = self.changelog.heads(start)
1170 heads = self.changelog.heads(start)
1175 # sort the output in rev descending order
1171 # sort the output in rev descending order
1176 heads = [(-self.changelog.rev(h), h) for h in heads]
1172 heads = [(-self.changelog.rev(h), h) for h in heads]
1177 return [n for (r, n) in sorted(heads)]
1173 return [n for (r, n) in sorted(heads)]
1178
1174
1179 def branchheads(self, branch=None, start=None, closed=False):
1175 def branchheads(self, branch=None, start=None, closed=False):
1180 '''return a (possibly filtered) list of heads for the given branch
1176 '''return a (possibly filtered) list of heads for the given branch
1181
1177
1182 Heads are returned in topological order, from newest to oldest.
1178 Heads are returned in topological order, from newest to oldest.
1183 If branch is None, use the dirstate branch.
1179 If branch is None, use the dirstate branch.
1184 If start is not None, return only heads reachable from start.
1180 If start is not None, return only heads reachable from start.
1185 If closed is True, return heads that are marked as closed as well.
1181 If closed is True, return heads that are marked as closed as well.
1186 '''
1182 '''
1187 if branch is None:
1183 if branch is None:
1188 branch = self[None].branch()
1184 branch = self[None].branch()
1189 branches = self.branchmap()
1185 branches = self.branchmap()
1190 if branch not in branches:
1186 if branch not in branches:
1191 return []
1187 return []
1192 # the cache returns heads ordered lowest to highest
1188 # the cache returns heads ordered lowest to highest
1193 bheads = list(reversed(branches[branch]))
1189 bheads = list(reversed(branches[branch]))
1194 if start is not None:
1190 if start is not None:
1195 # filter out the heads that cannot be reached from startrev
1191 # filter out the heads that cannot be reached from startrev
1196 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1192 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1197 bheads = [h for h in bheads if h in fbheads]
1193 bheads = [h for h in bheads if h in fbheads]
1198 if not closed:
1194 if not closed:
1199 bheads = [h for h in bheads if
1195 bheads = [h for h in bheads if
1200 ('close' not in self.changelog.read(h)[5])]
1196 ('close' not in self.changelog.read(h)[5])]
1201 return bheads
1197 return bheads
1202
1198
1203 def branches(self, nodes):
1199 def branches(self, nodes):
1204 if not nodes:
1200 if not nodes:
1205 nodes = [self.changelog.tip()]
1201 nodes = [self.changelog.tip()]
1206 b = []
1202 b = []
1207 for n in nodes:
1203 for n in nodes:
1208 t = n
1204 t = n
1209 while 1:
1205 while 1:
1210 p = self.changelog.parents(n)
1206 p = self.changelog.parents(n)
1211 if p[1] != nullid or p[0] == nullid:
1207 if p[1] != nullid or p[0] == nullid:
1212 b.append((t, n, p[0], p[1]))
1208 b.append((t, n, p[0], p[1]))
1213 break
1209 break
1214 n = p[0]
1210 n = p[0]
1215 return b
1211 return b
1216
1212
1217 def between(self, pairs):
1213 def between(self, pairs):
1218 r = []
1214 r = []
1219
1215
1220 for top, bottom in pairs:
1216 for top, bottom in pairs:
1221 n, l, i = top, [], 0
1217 n, l, i = top, [], 0
1222 f = 1
1218 f = 1
1223
1219
1224 while n != bottom and n != nullid:
1220 while n != bottom and n != nullid:
1225 p = self.changelog.parents(n)[0]
1221 p = self.changelog.parents(n)[0]
1226 if i == f:
1222 if i == f:
1227 l.append(n)
1223 l.append(n)
1228 f = f * 2
1224 f = f * 2
1229 n = p
1225 n = p
1230 i += 1
1226 i += 1
1231
1227
1232 r.append(l)
1228 r.append(l)
1233
1229
1234 return r
1230 return r
1235
1231
1236 def findincoming(self, remote, base=None, heads=None, force=False):
1232 def findincoming(self, remote, base=None, heads=None, force=False):
1237 """Return list of roots of the subsets of missing nodes from remote
1233 """Return list of roots of the subsets of missing nodes from remote
1238
1234
1239 If base dict is specified, assume that these nodes and their parents
1235 If base dict is specified, assume that these nodes and their parents
1240 exist on the remote side and that no child of a node of base exists
1236 exist on the remote side and that no child of a node of base exists
1241 in both remote and self.
1237 in both remote and self.
1242 Furthermore base will be updated to include the nodes that exists
1238 Furthermore base will be updated to include the nodes that exists
1243 in self and remote but no children exists in self and remote.
1239 in self and remote but no children exists in self and remote.
1244 If a list of heads is specified, return only nodes which are heads
1240 If a list of heads is specified, return only nodes which are heads
1245 or ancestors of these heads.
1241 or ancestors of these heads.
1246
1242
1247 All the ancestors of base are in self and in remote.
1243 All the ancestors of base are in self and in remote.
1248 All the descendants of the list returned are missing in self.
1244 All the descendants of the list returned are missing in self.
1249 (and so we know that the rest of the nodes are missing in remote, see
1245 (and so we know that the rest of the nodes are missing in remote, see
1250 outgoing)
1246 outgoing)
1251 """
1247 """
1252 return self.findcommonincoming(remote, base, heads, force)[1]
1248 return self.findcommonincoming(remote, base, heads, force)[1]
1253
1249
1254 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1250 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1255 """Return a tuple (common, missing roots, heads) used to identify
1251 """Return a tuple (common, missing roots, heads) used to identify
1256 missing nodes from remote.
1252 missing nodes from remote.
1257
1253
1258 If base dict is specified, assume that these nodes and their parents
1254 If base dict is specified, assume that these nodes and their parents
1259 exist on the remote side and that no child of a node of base exists
1255 exist on the remote side and that no child of a node of base exists
1260 in both remote and self.
1256 in both remote and self.
1261 Furthermore base will be updated to include the nodes that exists
1257 Furthermore base will be updated to include the nodes that exists
1262 in self and remote but no children exists in self and remote.
1258 in self and remote but no children exists in self and remote.
1263 If a list of heads is specified, return only nodes which are heads
1259 If a list of heads is specified, return only nodes which are heads
1264 or ancestors of these heads.
1260 or ancestors of these heads.
1265
1261
1266 All the ancestors of base are in self and in remote.
1262 All the ancestors of base are in self and in remote.
1267 """
1263 """
1268 m = self.changelog.nodemap
1264 m = self.changelog.nodemap
1269 search = []
1265 search = []
1270 fetch = set()
1266 fetch = set()
1271 seen = set()
1267 seen = set()
1272 seenbranch = set()
1268 seenbranch = set()
1273 if base is None:
1269 if base is None:
1274 base = {}
1270 base = {}
1275
1271
1276 if not heads:
1272 if not heads:
1277 heads = remote.heads()
1273 heads = remote.heads()
1278
1274
1279 if self.changelog.tip() == nullid:
1275 if self.changelog.tip() == nullid:
1280 base[nullid] = 1
1276 base[nullid] = 1
1281 if heads != [nullid]:
1277 if heads != [nullid]:
1282 return [nullid], [nullid], list(heads)
1278 return [nullid], [nullid], list(heads)
1283 return [nullid], [], []
1279 return [nullid], [], []
1284
1280
1285 # assume we're closer to the tip than the root
1281 # assume we're closer to the tip than the root
1286 # and start by examining the heads
1282 # and start by examining the heads
1287 self.ui.status(_("searching for changes\n"))
1283 self.ui.status(_("searching for changes\n"))
1288
1284
1289 unknown = []
1285 unknown = []
1290 for h in heads:
1286 for h in heads:
1291 if h not in m:
1287 if h not in m:
1292 unknown.append(h)
1288 unknown.append(h)
1293 else:
1289 else:
1294 base[h] = 1
1290 base[h] = 1
1295
1291
1296 heads = unknown
1292 heads = unknown
1297 if not unknown:
1293 if not unknown:
1298 return base.keys(), [], []
1294 return base.keys(), [], []
1299
1295
1300 req = set(unknown)
1296 req = set(unknown)
1301 reqcnt = 0
1297 reqcnt = 0
1302
1298
1303 # search through remote branches
1299 # search through remote branches
1304 # a 'branch' here is a linear segment of history, with four parts:
1300 # a 'branch' here is a linear segment of history, with four parts:
1305 # head, root, first parent, second parent
1301 # head, root, first parent, second parent
1306 # (a branch always has two parents (or none) by definition)
1302 # (a branch always has two parents (or none) by definition)
1307 unknown = remote.branches(unknown)
1303 unknown = remote.branches(unknown)
1308 while unknown:
1304 while unknown:
1309 r = []
1305 r = []
1310 while unknown:
1306 while unknown:
1311 n = unknown.pop(0)
1307 n = unknown.pop(0)
1312 if n[0] in seen:
1308 if n[0] in seen:
1313 continue
1309 continue
1314
1310
1315 self.ui.debug("examining %s:%s\n"
1311 self.ui.debug("examining %s:%s\n"
1316 % (short(n[0]), short(n[1])))
1312 % (short(n[0]), short(n[1])))
1317 if n[0] == nullid: # found the end of the branch
1313 if n[0] == nullid: # found the end of the branch
1318 pass
1314 pass
1319 elif n in seenbranch:
1315 elif n in seenbranch:
1320 self.ui.debug("branch already found\n")
1316 self.ui.debug("branch already found\n")
1321 continue
1317 continue
1322 elif n[1] and n[1] in m: # do we know the base?
1318 elif n[1] and n[1] in m: # do we know the base?
1323 self.ui.debug("found incomplete branch %s:%s\n"
1319 self.ui.debug("found incomplete branch %s:%s\n"
1324 % (short(n[0]), short(n[1])))
1320 % (short(n[0]), short(n[1])))
1325 search.append(n[0:2]) # schedule branch range for scanning
1321 search.append(n[0:2]) # schedule branch range for scanning
1326 seenbranch.add(n)
1322 seenbranch.add(n)
1327 else:
1323 else:
1328 if n[1] not in seen and n[1] not in fetch:
1324 if n[1] not in seen and n[1] not in fetch:
1329 if n[2] in m and n[3] in m:
1325 if n[2] in m and n[3] in m:
1330 self.ui.debug("found new changeset %s\n" %
1326 self.ui.debug("found new changeset %s\n" %
1331 short(n[1]))
1327 short(n[1]))
1332 fetch.add(n[1]) # earliest unknown
1328 fetch.add(n[1]) # earliest unknown
1333 for p in n[2:4]:
1329 for p in n[2:4]:
1334 if p in m:
1330 if p in m:
1335 base[p] = 1 # latest known
1331 base[p] = 1 # latest known
1336
1332
1337 for p in n[2:4]:
1333 for p in n[2:4]:
1338 if p not in req and p not in m:
1334 if p not in req and p not in m:
1339 r.append(p)
1335 r.append(p)
1340 req.add(p)
1336 req.add(p)
1341 seen.add(n[0])
1337 seen.add(n[0])
1342
1338
1343 if r:
1339 if r:
1344 reqcnt += 1
1340 reqcnt += 1
1345 self.ui.debug("request %d: %s\n" %
1341 self.ui.debug("request %d: %s\n" %
1346 (reqcnt, " ".join(map(short, r))))
1342 (reqcnt, " ".join(map(short, r))))
1347 for p in xrange(0, len(r), 10):
1343 for p in xrange(0, len(r), 10):
1348 for b in remote.branches(r[p:p + 10]):
1344 for b in remote.branches(r[p:p + 10]):
1349 self.ui.debug("received %s:%s\n" %
1345 self.ui.debug("received %s:%s\n" %
1350 (short(b[0]), short(b[1])))
1346 (short(b[0]), short(b[1])))
1351 unknown.append(b)
1347 unknown.append(b)
1352
1348
1353 # do binary search on the branches we found
1349 # do binary search on the branches we found
1354 while search:
1350 while search:
1355 newsearch = []
1351 newsearch = []
1356 reqcnt += 1
1352 reqcnt += 1
1357 for n, l in zip(search, remote.between(search)):
1353 for n, l in zip(search, remote.between(search)):
1358 l.append(n[1])
1354 l.append(n[1])
1359 p = n[0]
1355 p = n[0]
1360 f = 1
1356 f = 1
1361 for i in l:
1357 for i in l:
1362 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1358 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1363 if i in m:
1359 if i in m:
1364 if f <= 2:
1360 if f <= 2:
1365 self.ui.debug("found new branch changeset %s\n" %
1361 self.ui.debug("found new branch changeset %s\n" %
1366 short(p))
1362 short(p))
1367 fetch.add(p)
1363 fetch.add(p)
1368 base[i] = 1
1364 base[i] = 1
1369 else:
1365 else:
1370 self.ui.debug("narrowed branch search to %s:%s\n"
1366 self.ui.debug("narrowed branch search to %s:%s\n"
1371 % (short(p), short(i)))
1367 % (short(p), short(i)))
1372 newsearch.append((p, i))
1368 newsearch.append((p, i))
1373 break
1369 break
1374 p, f = i, f * 2
1370 p, f = i, f * 2
1375 search = newsearch
1371 search = newsearch
1376
1372
1377 # sanity check our fetch list
1373 # sanity check our fetch list
1378 for f in fetch:
1374 for f in fetch:
1379 if f in m:
1375 if f in m:
1380 raise error.RepoError(_("already have changeset ")
1376 raise error.RepoError(_("already have changeset ")
1381 + short(f[:4]))
1377 + short(f[:4]))
1382
1378
1383 if base.keys() == [nullid]:
1379 if base.keys() == [nullid]:
1384 if force:
1380 if force:
1385 self.ui.warn(_("warning: repository is unrelated\n"))
1381 self.ui.warn(_("warning: repository is unrelated\n"))
1386 else:
1382 else:
1387 raise util.Abort(_("repository is unrelated"))
1383 raise util.Abort(_("repository is unrelated"))
1388
1384
1389 self.ui.debug("found new changesets starting at " +
1385 self.ui.debug("found new changesets starting at " +
1390 " ".join([short(f) for f in fetch]) + "\n")
1386 " ".join([short(f) for f in fetch]) + "\n")
1391
1387
1392 self.ui.debug("%d total queries\n" % reqcnt)
1388 self.ui.debug("%d total queries\n" % reqcnt)
1393
1389
1394 return base.keys(), list(fetch), heads
1390 return base.keys(), list(fetch), heads
1395
1391
1396 def findoutgoing(self, remote, base=None, heads=None, force=False):
1392 def findoutgoing(self, remote, base=None, heads=None, force=False):
1397 """Return list of nodes that are roots of subsets not in remote
1393 """Return list of nodes that are roots of subsets not in remote
1398
1394
1399 If base dict is specified, assume that these nodes and their parents
1395 If base dict is specified, assume that these nodes and their parents
1400 exist on the remote side.
1396 exist on the remote side.
1401 If a list of heads is specified, return only nodes which are heads
1397 If a list of heads is specified, return only nodes which are heads
1402 or ancestors of these heads, and return a second element which
1398 or ancestors of these heads, and return a second element which
1403 contains all remote heads which get new children.
1399 contains all remote heads which get new children.
1404 """
1400 """
1405 if base is None:
1401 if base is None:
1406 base = {}
1402 base = {}
1407 self.findincoming(remote, base, heads, force=force)
1403 self.findincoming(remote, base, heads, force=force)
1408
1404
1409 self.ui.debug("common changesets up to "
1405 self.ui.debug("common changesets up to "
1410 + " ".join(map(short, base.keys())) + "\n")
1406 + " ".join(map(short, base.keys())) + "\n")
1411
1407
1412 remain = set(self.changelog.nodemap)
1408 remain = set(self.changelog.nodemap)
1413
1409
1414 # prune everything remote has from the tree
1410 # prune everything remote has from the tree
1415 remain.remove(nullid)
1411 remain.remove(nullid)
1416 remove = base.keys()
1412 remove = base.keys()
1417 while remove:
1413 while remove:
1418 n = remove.pop(0)
1414 n = remove.pop(0)
1419 if n in remain:
1415 if n in remain:
1420 remain.remove(n)
1416 remain.remove(n)
1421 for p in self.changelog.parents(n):
1417 for p in self.changelog.parents(n):
1422 remove.append(p)
1418 remove.append(p)
1423
1419
1424 # find every node whose parents have been pruned
1420 # find every node whose parents have been pruned
1425 subset = []
1421 subset = []
1426 # find every remote head that will get new children
1422 # find every remote head that will get new children
1427 updated_heads = set()
1423 updated_heads = set()
1428 for n in remain:
1424 for n in remain:
1429 p1, p2 = self.changelog.parents(n)
1425 p1, p2 = self.changelog.parents(n)
1430 if p1 not in remain and p2 not in remain:
1426 if p1 not in remain and p2 not in remain:
1431 subset.append(n)
1427 subset.append(n)
1432 if heads:
1428 if heads:
1433 if p1 in heads:
1429 if p1 in heads:
1434 updated_heads.add(p1)
1430 updated_heads.add(p1)
1435 if p2 in heads:
1431 if p2 in heads:
1436 updated_heads.add(p2)
1432 updated_heads.add(p2)
1437
1433
1438 # this is the set of all roots we have to push
1434 # this is the set of all roots we have to push
1439 if heads:
1435 if heads:
1440 return subset, list(updated_heads)
1436 return subset, list(updated_heads)
1441 else:
1437 else:
1442 return subset
1438 return subset
1443
1439
1444 def pull(self, remote, heads=None, force=False):
1440 def pull(self, remote, heads=None, force=False):
1445 lock = self.lock()
1441 lock = self.lock()
1446 try:
1442 try:
1447 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1443 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1448 force=force)
1444 force=force)
1449 if fetch == [nullid]:
1445 if fetch == [nullid]:
1450 self.ui.status(_("requesting all changes\n"))
1446 self.ui.status(_("requesting all changes\n"))
1451
1447
1452 if not fetch:
1448 if not fetch:
1453 self.ui.status(_("no changes found\n"))
1449 self.ui.status(_("no changes found\n"))
1454 return 0
1450 return 0
1455
1451
1456 if heads is None and remote.capable('changegroupsubset'):
1452 if heads is None and remote.capable('changegroupsubset'):
1457 heads = rheads
1453 heads = rheads
1458
1454
1459 if heads is None:
1455 if heads is None:
1460 cg = remote.changegroup(fetch, 'pull')
1456 cg = remote.changegroup(fetch, 'pull')
1461 else:
1457 else:
1462 if not remote.capable('changegroupsubset'):
1458 if not remote.capable('changegroupsubset'):
1463 raise util.Abort(_("Partial pull cannot be done because "
1459 raise util.Abort(_("Partial pull cannot be done because "
1464 "other repository doesn't support "
1460 "other repository doesn't support "
1465 "changegroupsubset."))
1461 "changegroupsubset."))
1466 cg = remote.changegroupsubset(fetch, heads, 'pull')
1462 cg = remote.changegroupsubset(fetch, heads, 'pull')
1467 return self.addchangegroup(cg, 'pull', remote.url())
1463 return self.addchangegroup(cg, 'pull', remote.url())
1468 finally:
1464 finally:
1469 lock.release()
1465 lock.release()
1470
1466
1471 def push(self, remote, force=False, revs=None):
1467 def push(self, remote, force=False, revs=None):
1472 # there are two ways to push to remote repo:
1468 # there are two ways to push to remote repo:
1473 #
1469 #
1474 # addchangegroup assumes local user can lock remote
1470 # addchangegroup assumes local user can lock remote
1475 # repo (local filesystem, old ssh servers).
1471 # repo (local filesystem, old ssh servers).
1476 #
1472 #
1477 # unbundle assumes local user cannot lock remote repo (new ssh
1473 # unbundle assumes local user cannot lock remote repo (new ssh
1478 # servers, http servers).
1474 # servers, http servers).
1479
1475
1480 if remote.capable('unbundle'):
1476 if remote.capable('unbundle'):
1481 return self.push_unbundle(remote, force, revs)
1477 return self.push_unbundle(remote, force, revs)
1482 return self.push_addchangegroup(remote, force, revs)
1478 return self.push_addchangegroup(remote, force, revs)
1483
1479
1484 def prepush(self, remote, force, revs):
1480 def prepush(self, remote, force, revs):
1485 '''Analyze the local and remote repositories and determine which
1481 '''Analyze the local and remote repositories and determine which
1486 changesets need to be pushed to the remote. Return a tuple
1482 changesets need to be pushed to the remote. Return a tuple
1487 (changegroup, remoteheads). changegroup is a readable file-like
1483 (changegroup, remoteheads). changegroup is a readable file-like
1488 object whose read() returns successive changegroup chunks ready to
1484 object whose read() returns successive changegroup chunks ready to
1489 be sent over the wire. remoteheads is the list of remote heads.
1485 be sent over the wire. remoteheads is the list of remote heads.
1490 '''
1486 '''
1491 common = {}
1487 common = {}
1492 remote_heads = remote.heads()
1488 remote_heads = remote.heads()
1493 inc = self.findincoming(remote, common, remote_heads, force=force)
1489 inc = self.findincoming(remote, common, remote_heads, force=force)
1494
1490
1495 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1491 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1496 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1492 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1497
1493
1498 def checkbranch(lheads, rheads, updatelb):
1494 def checkbranch(lheads, rheads, updatelb):
1499 '''
1495 '''
1500 check whether there are more local heads than remote heads on
1496 check whether there are more local heads than remote heads on
1501 a specific branch.
1497 a specific branch.
1502
1498
1503 lheads: local branch heads
1499 lheads: local branch heads
1504 rheads: remote branch heads
1500 rheads: remote branch heads
1505 updatelb: outgoing local branch bases
1501 updatelb: outgoing local branch bases
1506 '''
1502 '''
1507
1503
1508 warn = 0
1504 warn = 0
1509
1505
1510 if not revs and len(lheads) > len(rheads):
1506 if not revs and len(lheads) > len(rheads):
1511 warn = 1
1507 warn = 1
1512 else:
1508 else:
1513 # add local heads involved in the push
1509 # add local heads involved in the push
1514 updatelheads = [self.changelog.heads(x, lheads)
1510 updatelheads = [self.changelog.heads(x, lheads)
1515 for x in updatelb]
1511 for x in updatelb]
1516 newheads = set(sum(updatelheads, [])) & set(lheads)
1512 newheads = set(sum(updatelheads, [])) & set(lheads)
1517
1513
1518 if not newheads:
1514 if not newheads:
1519 return True
1515 return True
1520
1516
1521 # add heads we don't have or that are not involved in the push
1517 # add heads we don't have or that are not involved in the push
1522 for r in rheads:
1518 for r in rheads:
1523 if r in self.changelog.nodemap:
1519 if r in self.changelog.nodemap:
1524 desc = self.changelog.heads(r, heads)
1520 desc = self.changelog.heads(r, heads)
1525 l = [h for h in heads if h in desc]
1521 l = [h for h in heads if h in desc]
1526 if not l:
1522 if not l:
1527 newheads.add(r)
1523 newheads.add(r)
1528 else:
1524 else:
1529 newheads.add(r)
1525 newheads.add(r)
1530 if len(newheads) > len(rheads):
1526 if len(newheads) > len(rheads):
1531 warn = 1
1527 warn = 1
1532
1528
1533 if warn:
1529 if warn:
1534 self.ui.warn(_("abort: push creates new remote heads!\n"))
1530 self.ui.warn(_("abort: push creates new remote heads!\n"))
1535 self.ui.status(_("(did you forget to merge?"
1531 self.ui.status(_("(did you forget to merge?"
1536 " use push -f to force)\n"))
1532 " use push -f to force)\n"))
1537 return False
1533 return False
1538 return True
1534 return True
1539
1535
1540 if not bases:
1536 if not bases:
1541 self.ui.status(_("no changes found\n"))
1537 self.ui.status(_("no changes found\n"))
1542 return None, 1
1538 return None, 1
1543 elif not force:
1539 elif not force:
1544 # Check for each named branch if we're creating new remote heads.
1540 # Check for each named branch if we're creating new remote heads.
1545 # To be a remote head after push, node must be either:
1541 # To be a remote head after push, node must be either:
1546 # - unknown locally
1542 # - unknown locally
1547 # - a local outgoing head descended from update
1543 # - a local outgoing head descended from update
1548 # - a remote head that's known locally and not
1544 # - a remote head that's known locally and not
1549 # ancestral to an outgoing head
1545 # ancestral to an outgoing head
1550 #
1546 #
1551 # New named branches cannot be created without --force.
1547 # New named branches cannot be created without --force.
1552
1548
1553 if remote_heads != [nullid]:
1549 if remote_heads != [nullid]:
1554 if remote.capable('branchmap'):
1550 if remote.capable('branchmap'):
1555 remotebrheads = remote.branchmap()
1551 remotebrheads = remote.branchmap()
1556
1552
1557 if not revs:
1553 if not revs:
1558 localbrheads = self.branchmap()
1554 localbrheads = self.branchmap()
1559 else:
1555 else:
1560 localbrheads = {}
1556 localbrheads = {}
1561 for n in heads:
1557 for n in heads:
1562 branch = self[n].branch()
1558 branch = self[n].branch()
1563 localbrheads.setdefault(branch, []).append(n)
1559 localbrheads.setdefault(branch, []).append(n)
1564
1560
1565 newbranches = list(set(localbrheads) - set(remotebrheads))
1561 newbranches = list(set(localbrheads) - set(remotebrheads))
1566 if newbranches: # new branch requires --force
1562 if newbranches: # new branch requires --force
1567 branchnames = ', '.join("%s" % b for b in newbranches)
1563 branchnames = ', '.join("%s" % b for b in newbranches)
1568 self.ui.warn(_("abort: push creates "
1564 self.ui.warn(_("abort: push creates "
1569 "new remote branches: %s!\n")
1565 "new remote branches: %s!\n")
1570 % branchnames)
1566 % branchnames)
1571 # propose 'push -b .' in the msg too?
1567 # propose 'push -b .' in the msg too?
1572 self.ui.status(_("(use 'hg push -f' to force)\n"))
1568 self.ui.status(_("(use 'hg push -f' to force)\n"))
1573 return None, 0
1569 return None, 0
1574 for branch, lheads in localbrheads.iteritems():
1570 for branch, lheads in localbrheads.iteritems():
1575 if branch in remotebrheads:
1571 if branch in remotebrheads:
1576 rheads = remotebrheads[branch]
1572 rheads = remotebrheads[branch]
1577 if not checkbranch(lheads, rheads, update):
1573 if not checkbranch(lheads, rheads, update):
1578 return None, 0
1574 return None, 0
1579 else:
1575 else:
1580 if not checkbranch(heads, remote_heads, update):
1576 if not checkbranch(heads, remote_heads, update):
1581 return None, 0
1577 return None, 0
1582
1578
1583 if inc:
1579 if inc:
1584 self.ui.warn(_("note: unsynced remote changes!\n"))
1580 self.ui.warn(_("note: unsynced remote changes!\n"))
1585
1581
1586
1582
1587 if revs is None:
1583 if revs is None:
1588 # use the fast path, no race possible on push
1584 # use the fast path, no race possible on push
1589 nodes = self.changelog.findmissing(common.keys())
1585 nodes = self.changelog.findmissing(common.keys())
1590 cg = self._changegroup(nodes, 'push')
1586 cg = self._changegroup(nodes, 'push')
1591 else:
1587 else:
1592 cg = self.changegroupsubset(update, revs, 'push')
1588 cg = self.changegroupsubset(update, revs, 'push')
1593 return cg, remote_heads
1589 return cg, remote_heads
1594
1590
1595 def push_addchangegroup(self, remote, force, revs):
1591 def push_addchangegroup(self, remote, force, revs):
1596 lock = remote.lock()
1592 lock = remote.lock()
1597 try:
1593 try:
1598 ret = self.prepush(remote, force, revs)
1594 ret = self.prepush(remote, force, revs)
1599 if ret[0] is not None:
1595 if ret[0] is not None:
1600 cg, remote_heads = ret
1596 cg, remote_heads = ret
1601 return remote.addchangegroup(cg, 'push', self.url())
1597 return remote.addchangegroup(cg, 'push', self.url())
1602 return ret[1]
1598 return ret[1]
1603 finally:
1599 finally:
1604 lock.release()
1600 lock.release()
1605
1601
1606 def push_unbundle(self, remote, force, revs):
1602 def push_unbundle(self, remote, force, revs):
1607 # local repo finds heads on server, finds out what revs it
1603 # local repo finds heads on server, finds out what revs it
1608 # must push. once revs transferred, if server finds it has
1604 # must push. once revs transferred, if server finds it has
1609 # different heads (someone else won commit/push race), server
1605 # different heads (someone else won commit/push race), server
1610 # aborts.
1606 # aborts.
1611
1607
1612 ret = self.prepush(remote, force, revs)
1608 ret = self.prepush(remote, force, revs)
1613 if ret[0] is not None:
1609 if ret[0] is not None:
1614 cg, remote_heads = ret
1610 cg, remote_heads = ret
1615 if force:
1611 if force:
1616 remote_heads = ['force']
1612 remote_heads = ['force']
1617 return remote.unbundle(cg, remote_heads, 'push')
1613 return remote.unbundle(cg, remote_heads, 'push')
1618 return ret[1]
1614 return ret[1]
1619
1615
1620 def changegroupinfo(self, nodes, source):
1616 def changegroupinfo(self, nodes, source):
1621 if self.ui.verbose or source == 'bundle':
1617 if self.ui.verbose or source == 'bundle':
1622 self.ui.status(_("%d changesets found\n") % len(nodes))
1618 self.ui.status(_("%d changesets found\n") % len(nodes))
1623 if self.ui.debugflag:
1619 if self.ui.debugflag:
1624 self.ui.debug("list of changesets:\n")
1620 self.ui.debug("list of changesets:\n")
1625 for node in nodes:
1621 for node in nodes:
1626 self.ui.debug("%s\n" % hex(node))
1622 self.ui.debug("%s\n" % hex(node))
1627
1623
1628 def changegroupsubset(self, bases, heads, source, extranodes=None):
1624 def changegroupsubset(self, bases, heads, source, extranodes=None):
1629 """Compute a changegroup consisting of all the nodes that are
1625 """Compute a changegroup consisting of all the nodes that are
1630 descendents of any of the bases and ancestors of any of the heads.
1626 descendents of any of the bases and ancestors of any of the heads.
1631 Return a chunkbuffer object whose read() method will return
1627 Return a chunkbuffer object whose read() method will return
1632 successive changegroup chunks.
1628 successive changegroup chunks.
1633
1629
1634 It is fairly complex as determining which filenodes and which
1630 It is fairly complex as determining which filenodes and which
1635 manifest nodes need to be included for the changeset to be complete
1631 manifest nodes need to be included for the changeset to be complete
1636 is non-trivial.
1632 is non-trivial.
1637
1633
1638 Another wrinkle is doing the reverse, figuring out which changeset in
1634 Another wrinkle is doing the reverse, figuring out which changeset in
1639 the changegroup a particular filenode or manifestnode belongs to.
1635 the changegroup a particular filenode or manifestnode belongs to.
1640
1636
1641 The caller can specify some nodes that must be included in the
1637 The caller can specify some nodes that must be included in the
1642 changegroup using the extranodes argument. It should be a dict
1638 changegroup using the extranodes argument. It should be a dict
1643 where the keys are the filenames (or 1 for the manifest), and the
1639 where the keys are the filenames (or 1 for the manifest), and the
1644 values are lists of (node, linknode) tuples, where node is a wanted
1640 values are lists of (node, linknode) tuples, where node is a wanted
1645 node and linknode is the changelog node that should be transmitted as
1641 node and linknode is the changelog node that should be transmitted as
1646 the linkrev.
1642 the linkrev.
1647 """
1643 """
1648
1644
1649 # Set up some initial variables
1645 # Set up some initial variables
1650 # Make it easy to refer to self.changelog
1646 # Make it easy to refer to self.changelog
1651 cl = self.changelog
1647 cl = self.changelog
1652 # msng is short for missing - compute the list of changesets in this
1648 # msng is short for missing - compute the list of changesets in this
1653 # changegroup.
1649 # changegroup.
1654 if not bases:
1650 if not bases:
1655 bases = [nullid]
1651 bases = [nullid]
1656 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1652 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1657
1653
1658 if extranodes is None:
1654 if extranodes is None:
1659 # can we go through the fast path ?
1655 # can we go through the fast path ?
1660 heads.sort()
1656 heads.sort()
1661 allheads = self.heads()
1657 allheads = self.heads()
1662 allheads.sort()
1658 allheads.sort()
1663 if heads == allheads:
1659 if heads == allheads:
1664 return self._changegroup(msng_cl_lst, source)
1660 return self._changegroup(msng_cl_lst, source)
1665
1661
1666 # slow path
1662 # slow path
1667 self.hook('preoutgoing', throw=True, source=source)
1663 self.hook('preoutgoing', throw=True, source=source)
1668
1664
1669 self.changegroupinfo(msng_cl_lst, source)
1665 self.changegroupinfo(msng_cl_lst, source)
1670 # Some bases may turn out to be superfluous, and some heads may be
1666 # Some bases may turn out to be superfluous, and some heads may be
1671 # too. nodesbetween will return the minimal set of bases and heads
1667 # too. nodesbetween will return the minimal set of bases and heads
1672 # necessary to re-create the changegroup.
1668 # necessary to re-create the changegroup.
1673
1669
1674 # Known heads are the list of heads that it is assumed the recipient
1670 # Known heads are the list of heads that it is assumed the recipient
1675 # of this changegroup will know about.
1671 # of this changegroup will know about.
1676 knownheads = set()
1672 knownheads = set()
1677 # We assume that all parents of bases are known heads.
1673 # We assume that all parents of bases are known heads.
1678 for n in bases:
1674 for n in bases:
1679 knownheads.update(cl.parents(n))
1675 knownheads.update(cl.parents(n))
1680 knownheads.discard(nullid)
1676 knownheads.discard(nullid)
1681 knownheads = list(knownheads)
1677 knownheads = list(knownheads)
1682 if knownheads:
1678 if knownheads:
1683 # Now that we know what heads are known, we can compute which
1679 # Now that we know what heads are known, we can compute which
1684 # changesets are known. The recipient must know about all
1680 # changesets are known. The recipient must know about all
1685 # changesets required to reach the known heads from the null
1681 # changesets required to reach the known heads from the null
1686 # changeset.
1682 # changeset.
1687 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1683 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1688 junk = None
1684 junk = None
1689 # Transform the list into a set.
1685 # Transform the list into a set.
1690 has_cl_set = set(has_cl_set)
1686 has_cl_set = set(has_cl_set)
1691 else:
1687 else:
1692 # If there were no known heads, the recipient cannot be assumed to
1688 # If there were no known heads, the recipient cannot be assumed to
1693 # know about any changesets.
1689 # know about any changesets.
1694 has_cl_set = set()
1690 has_cl_set = set()
1695
1691
1696 # Make it easy to refer to self.manifest
1692 # Make it easy to refer to self.manifest
1697 mnfst = self.manifest
1693 mnfst = self.manifest
1698 # We don't know which manifests are missing yet
1694 # We don't know which manifests are missing yet
1699 msng_mnfst_set = {}
1695 msng_mnfst_set = {}
1700 # Nor do we know which filenodes are missing.
1696 # Nor do we know which filenodes are missing.
1701 msng_filenode_set = {}
1697 msng_filenode_set = {}
1702
1698
1703 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1699 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1704 junk = None
1700 junk = None
1705
1701
1706 # A changeset always belongs to itself, so the changenode lookup
1702 # A changeset always belongs to itself, so the changenode lookup
1707 # function for a changenode is identity.
1703 # function for a changenode is identity.
1708 def identity(x):
1704 def identity(x):
1709 return x
1705 return x
1710
1706
1711 # If we determine that a particular file or manifest node must be a
1707 # If we determine that a particular file or manifest node must be a
1712 # node that the recipient of the changegroup will already have, we can
1708 # node that the recipient of the changegroup will already have, we can
1713 # also assume the recipient will have all the parents. This function
1709 # also assume the recipient will have all the parents. This function
1714 # prunes them from the set of missing nodes.
1710 # prunes them from the set of missing nodes.
1715 def prune_parents(revlog, hasset, msngset):
1711 def prune_parents(revlog, hasset, msngset):
1716 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1712 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1717 msngset.pop(revlog.node(r), None)
1713 msngset.pop(revlog.node(r), None)
1718
1714
1719 # Use the information collected in collect_manifests_and_files to say
1715 # Use the information collected in collect_manifests_and_files to say
1720 # which changenode any manifestnode belongs to.
1716 # which changenode any manifestnode belongs to.
1721 def lookup_manifest_link(mnfstnode):
1717 def lookup_manifest_link(mnfstnode):
1722 return msng_mnfst_set[mnfstnode]
1718 return msng_mnfst_set[mnfstnode]
1723
1719
1724 # A function generating function that sets up the initial environment
1720 # A function generating function that sets up the initial environment
1725 # the inner function.
1721 # the inner function.
1726 def filenode_collector(changedfiles):
1722 def filenode_collector(changedfiles):
1727 # This gathers information from each manifestnode included in the
1723 # This gathers information from each manifestnode included in the
1728 # changegroup about which filenodes the manifest node references
1724 # changegroup about which filenodes the manifest node references
1729 # so we can include those in the changegroup too.
1725 # so we can include those in the changegroup too.
1730 #
1726 #
1731 # It also remembers which changenode each filenode belongs to. It
1727 # It also remembers which changenode each filenode belongs to. It
1732 # does this by assuming the a filenode belongs to the changenode
1728 # does this by assuming the a filenode belongs to the changenode
1733 # the first manifest that references it belongs to.
1729 # the first manifest that references it belongs to.
1734 def collect_msng_filenodes(mnfstnode):
1730 def collect_msng_filenodes(mnfstnode):
1735 r = mnfst.rev(mnfstnode)
1731 r = mnfst.rev(mnfstnode)
1736 if r - 1 in mnfst.parentrevs(r):
1732 if r - 1 in mnfst.parentrevs(r):
1737 # If the previous rev is one of the parents,
1733 # If the previous rev is one of the parents,
1738 # we only need to see a diff.
1734 # we only need to see a diff.
1739 deltamf = mnfst.readdelta(mnfstnode)
1735 deltamf = mnfst.readdelta(mnfstnode)
1740 # For each line in the delta
1736 # For each line in the delta
1741 for f, fnode in deltamf.iteritems():
1737 for f, fnode in deltamf.iteritems():
1742 f = changedfiles.get(f, None)
1738 f = changedfiles.get(f, None)
1743 # And if the file is in the list of files we care
1739 # And if the file is in the list of files we care
1744 # about.
1740 # about.
1745 if f is not None:
1741 if f is not None:
1746 # Get the changenode this manifest belongs to
1742 # Get the changenode this manifest belongs to
1747 clnode = msng_mnfst_set[mnfstnode]
1743 clnode = msng_mnfst_set[mnfstnode]
1748 # Create the set of filenodes for the file if
1744 # Create the set of filenodes for the file if
1749 # there isn't one already.
1745 # there isn't one already.
1750 ndset = msng_filenode_set.setdefault(f, {})
1746 ndset = msng_filenode_set.setdefault(f, {})
1751 # And set the filenode's changelog node to the
1747 # And set the filenode's changelog node to the
1752 # manifest's if it hasn't been set already.
1748 # manifest's if it hasn't been set already.
1753 ndset.setdefault(fnode, clnode)
1749 ndset.setdefault(fnode, clnode)
1754 else:
1750 else:
1755 # Otherwise we need a full manifest.
1751 # Otherwise we need a full manifest.
1756 m = mnfst.read(mnfstnode)
1752 m = mnfst.read(mnfstnode)
1757 # For every file in we care about.
1753 # For every file in we care about.
1758 for f in changedfiles:
1754 for f in changedfiles:
1759 fnode = m.get(f, None)
1755 fnode = m.get(f, None)
1760 # If it's in the manifest
1756 # If it's in the manifest
1761 if fnode is not None:
1757 if fnode is not None:
1762 # See comments above.
1758 # See comments above.
1763 clnode = msng_mnfst_set[mnfstnode]
1759 clnode = msng_mnfst_set[mnfstnode]
1764 ndset = msng_filenode_set.setdefault(f, {})
1760 ndset = msng_filenode_set.setdefault(f, {})
1765 ndset.setdefault(fnode, clnode)
1761 ndset.setdefault(fnode, clnode)
1766 return collect_msng_filenodes
1762 return collect_msng_filenodes
1767
1763
1768 # We have a list of filenodes we think we need for a file, lets remove
1764 # We have a list of filenodes we think we need for a file, lets remove
1769 # all those we know the recipient must have.
1765 # all those we know the recipient must have.
1770 def prune_filenodes(f, filerevlog):
1766 def prune_filenodes(f, filerevlog):
1771 msngset = msng_filenode_set[f]
1767 msngset = msng_filenode_set[f]
1772 hasset = set()
1768 hasset = set()
1773 # If a 'missing' filenode thinks it belongs to a changenode we
1769 # If a 'missing' filenode thinks it belongs to a changenode we
1774 # assume the recipient must have, then the recipient must have
1770 # assume the recipient must have, then the recipient must have
1775 # that filenode.
1771 # that filenode.
1776 for n in msngset:
1772 for n in msngset:
1777 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1773 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1778 if clnode in has_cl_set:
1774 if clnode in has_cl_set:
1779 hasset.add(n)
1775 hasset.add(n)
1780 prune_parents(filerevlog, hasset, msngset)
1776 prune_parents(filerevlog, hasset, msngset)
1781
1777
1782 # A function generator function that sets up the a context for the
1778 # A function generator function that sets up the a context for the
1783 # inner function.
1779 # inner function.
1784 def lookup_filenode_link_func(fname):
1780 def lookup_filenode_link_func(fname):
1785 msngset = msng_filenode_set[fname]
1781 msngset = msng_filenode_set[fname]
1786 # Lookup the changenode the filenode belongs to.
1782 # Lookup the changenode the filenode belongs to.
1787 def lookup_filenode_link(fnode):
1783 def lookup_filenode_link(fnode):
1788 return msngset[fnode]
1784 return msngset[fnode]
1789 return lookup_filenode_link
1785 return lookup_filenode_link
1790
1786
1791 # Add the nodes that were explicitly requested.
1787 # Add the nodes that were explicitly requested.
1792 def add_extra_nodes(name, nodes):
1788 def add_extra_nodes(name, nodes):
1793 if not extranodes or name not in extranodes:
1789 if not extranodes or name not in extranodes:
1794 return
1790 return
1795
1791
1796 for node, linknode in extranodes[name]:
1792 for node, linknode in extranodes[name]:
1797 if node not in nodes:
1793 if node not in nodes:
1798 nodes[node] = linknode
1794 nodes[node] = linknode
1799
1795
1800 # Now that we have all theses utility functions to help out and
1796 # Now that we have all theses utility functions to help out and
1801 # logically divide up the task, generate the group.
1797 # logically divide up the task, generate the group.
1802 def gengroup():
1798 def gengroup():
1803 # The set of changed files starts empty.
1799 # The set of changed files starts empty.
1804 changedfiles = {}
1800 changedfiles = {}
1805 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1801 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1806
1802
1807 # Create a changenode group generator that will call our functions
1803 # Create a changenode group generator that will call our functions
1808 # back to lookup the owning changenode and collect information.
1804 # back to lookup the owning changenode and collect information.
1809 group = cl.group(msng_cl_lst, identity, collect)
1805 group = cl.group(msng_cl_lst, identity, collect)
1810 for chnk in group:
1806 for chnk in group:
1811 yield chnk
1807 yield chnk
1812
1808
1813 # Figure out which manifest nodes (of the ones we think might be
1809 # Figure out which manifest nodes (of the ones we think might be
1814 # part of the changegroup) the recipient must know about and
1810 # part of the changegroup) the recipient must know about and
1815 # remove them from the changegroup.
1811 # remove them from the changegroup.
1816 has_mnfst_set = set()
1812 has_mnfst_set = set()
1817 for n in msng_mnfst_set:
1813 for n in msng_mnfst_set:
1818 # If a 'missing' manifest thinks it belongs to a changenode
1814 # If a 'missing' manifest thinks it belongs to a changenode
1819 # the recipient is assumed to have, obviously the recipient
1815 # the recipient is assumed to have, obviously the recipient
1820 # must have that manifest.
1816 # must have that manifest.
1821 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1817 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1822 if linknode in has_cl_set:
1818 if linknode in has_cl_set:
1823 has_mnfst_set.add(n)
1819 has_mnfst_set.add(n)
1824 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1820 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1825 add_extra_nodes(1, msng_mnfst_set)
1821 add_extra_nodes(1, msng_mnfst_set)
1826 msng_mnfst_lst = msng_mnfst_set.keys()
1822 msng_mnfst_lst = msng_mnfst_set.keys()
1827 # Sort the manifestnodes by revision number.
1823 # Sort the manifestnodes by revision number.
1828 msng_mnfst_lst.sort(key=mnfst.rev)
1824 msng_mnfst_lst.sort(key=mnfst.rev)
1829 # Create a generator for the manifestnodes that calls our lookup
1825 # Create a generator for the manifestnodes that calls our lookup
1830 # and data collection functions back.
1826 # and data collection functions back.
1831 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1827 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1832 filenode_collector(changedfiles))
1828 filenode_collector(changedfiles))
1833 for chnk in group:
1829 for chnk in group:
1834 yield chnk
1830 yield chnk
1835
1831
1836 # These are no longer needed, dereference and toss the memory for
1832 # These are no longer needed, dereference and toss the memory for
1837 # them.
1833 # them.
1838 msng_mnfst_lst = None
1834 msng_mnfst_lst = None
1839 msng_mnfst_set.clear()
1835 msng_mnfst_set.clear()
1840
1836
1841 if extranodes:
1837 if extranodes:
1842 for fname in extranodes:
1838 for fname in extranodes:
1843 if isinstance(fname, int):
1839 if isinstance(fname, int):
1844 continue
1840 continue
1845 msng_filenode_set.setdefault(fname, {})
1841 msng_filenode_set.setdefault(fname, {})
1846 changedfiles[fname] = 1
1842 changedfiles[fname] = 1
1847 # Go through all our files in order sorted by name.
1843 # Go through all our files in order sorted by name.
1848 for fname in sorted(changedfiles):
1844 for fname in sorted(changedfiles):
1849 filerevlog = self.file(fname)
1845 filerevlog = self.file(fname)
1850 if not len(filerevlog):
1846 if not len(filerevlog):
1851 raise util.Abort(_("empty or missing revlog for %s") % fname)
1847 raise util.Abort(_("empty or missing revlog for %s") % fname)
1852 # Toss out the filenodes that the recipient isn't really
1848 # Toss out the filenodes that the recipient isn't really
1853 # missing.
1849 # missing.
1854 if fname in msng_filenode_set:
1850 if fname in msng_filenode_set:
1855 prune_filenodes(fname, filerevlog)
1851 prune_filenodes(fname, filerevlog)
1856 add_extra_nodes(fname, msng_filenode_set[fname])
1852 add_extra_nodes(fname, msng_filenode_set[fname])
1857 msng_filenode_lst = msng_filenode_set[fname].keys()
1853 msng_filenode_lst = msng_filenode_set[fname].keys()
1858 else:
1854 else:
1859 msng_filenode_lst = []
1855 msng_filenode_lst = []
1860 # If any filenodes are left, generate the group for them,
1856 # If any filenodes are left, generate the group for them,
1861 # otherwise don't bother.
1857 # otherwise don't bother.
1862 if len(msng_filenode_lst) > 0:
1858 if len(msng_filenode_lst) > 0:
1863 yield changegroup.chunkheader(len(fname))
1859 yield changegroup.chunkheader(len(fname))
1864 yield fname
1860 yield fname
1865 # Sort the filenodes by their revision #
1861 # Sort the filenodes by their revision #
1866 msng_filenode_lst.sort(key=filerevlog.rev)
1862 msng_filenode_lst.sort(key=filerevlog.rev)
1867 # Create a group generator and only pass in a changenode
1863 # Create a group generator and only pass in a changenode
1868 # lookup function as we need to collect no information
1864 # lookup function as we need to collect no information
1869 # from filenodes.
1865 # from filenodes.
1870 group = filerevlog.group(msng_filenode_lst,
1866 group = filerevlog.group(msng_filenode_lst,
1871 lookup_filenode_link_func(fname))
1867 lookup_filenode_link_func(fname))
1872 for chnk in group:
1868 for chnk in group:
1873 yield chnk
1869 yield chnk
1874 if fname in msng_filenode_set:
1870 if fname in msng_filenode_set:
1875 # Don't need this anymore, toss it to free memory.
1871 # Don't need this anymore, toss it to free memory.
1876 del msng_filenode_set[fname]
1872 del msng_filenode_set[fname]
1877 # Signal that no more groups are left.
1873 # Signal that no more groups are left.
1878 yield changegroup.closechunk()
1874 yield changegroup.closechunk()
1879
1875
1880 if msng_cl_lst:
1876 if msng_cl_lst:
1881 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1877 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1882
1878
1883 return util.chunkbuffer(gengroup())
1879 return util.chunkbuffer(gengroup())
1884
1880
1885 def changegroup(self, basenodes, source):
1881 def changegroup(self, basenodes, source):
1886 # to avoid a race we use changegroupsubset() (issue1320)
1882 # to avoid a race we use changegroupsubset() (issue1320)
1887 return self.changegroupsubset(basenodes, self.heads(), source)
1883 return self.changegroupsubset(basenodes, self.heads(), source)
1888
1884
1889 def _changegroup(self, nodes, source):
1885 def _changegroup(self, nodes, source):
1890 """Compute the changegroup of all nodes that we have that a recipient
1886 """Compute the changegroup of all nodes that we have that a recipient
1891 doesn't. Return a chunkbuffer object whose read() method will return
1887 doesn't. Return a chunkbuffer object whose read() method will return
1892 successive changegroup chunks.
1888 successive changegroup chunks.
1893
1889
1894 This is much easier than the previous function as we can assume that
1890 This is much easier than the previous function as we can assume that
1895 the recipient has any changenode we aren't sending them.
1891 the recipient has any changenode we aren't sending them.
1896
1892
1897 nodes is the set of nodes to send"""
1893 nodes is the set of nodes to send"""
1898
1894
1899 self.hook('preoutgoing', throw=True, source=source)
1895 self.hook('preoutgoing', throw=True, source=source)
1900
1896
1901 cl = self.changelog
1897 cl = self.changelog
1902 revset = set([cl.rev(n) for n in nodes])
1898 revset = set([cl.rev(n) for n in nodes])
1903 self.changegroupinfo(nodes, source)
1899 self.changegroupinfo(nodes, source)
1904
1900
1905 def identity(x):
1901 def identity(x):
1906 return x
1902 return x
1907
1903
1908 def gennodelst(log):
1904 def gennodelst(log):
1909 for r in log:
1905 for r in log:
1910 if log.linkrev(r) in revset:
1906 if log.linkrev(r) in revset:
1911 yield log.node(r)
1907 yield log.node(r)
1912
1908
1913 def lookuprevlink_func(revlog):
1909 def lookuprevlink_func(revlog):
1914 def lookuprevlink(n):
1910 def lookuprevlink(n):
1915 return cl.node(revlog.linkrev(revlog.rev(n)))
1911 return cl.node(revlog.linkrev(revlog.rev(n)))
1916 return lookuprevlink
1912 return lookuprevlink
1917
1913
1918 def gengroup():
1914 def gengroup():
1919 '''yield a sequence of changegroup chunks (strings)'''
1915 '''yield a sequence of changegroup chunks (strings)'''
1920 # construct a list of all changed files
1916 # construct a list of all changed files
1921 changedfiles = {}
1917 changedfiles = {}
1922 mmfs = {}
1918 mmfs = {}
1923 collect = changegroup.collector(cl, mmfs, changedfiles)
1919 collect = changegroup.collector(cl, mmfs, changedfiles)
1924
1920
1925 for chnk in cl.group(nodes, identity, collect):
1921 for chnk in cl.group(nodes, identity, collect):
1926 yield chnk
1922 yield chnk
1927
1923
1928 mnfst = self.manifest
1924 mnfst = self.manifest
1929 nodeiter = gennodelst(mnfst)
1925 nodeiter = gennodelst(mnfst)
1930 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1926 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1931 yield chnk
1927 yield chnk
1932
1928
1933 for fname in sorted(changedfiles):
1929 for fname in sorted(changedfiles):
1934 filerevlog = self.file(fname)
1930 filerevlog = self.file(fname)
1935 if not len(filerevlog):
1931 if not len(filerevlog):
1936 raise util.Abort(_("empty or missing revlog for %s") % fname)
1932 raise util.Abort(_("empty or missing revlog for %s") % fname)
1937 nodeiter = gennodelst(filerevlog)
1933 nodeiter = gennodelst(filerevlog)
1938 nodeiter = list(nodeiter)
1934 nodeiter = list(nodeiter)
1939 if nodeiter:
1935 if nodeiter:
1940 yield changegroup.chunkheader(len(fname))
1936 yield changegroup.chunkheader(len(fname))
1941 yield fname
1937 yield fname
1942 lookup = lookuprevlink_func(filerevlog)
1938 lookup = lookuprevlink_func(filerevlog)
1943 for chnk in filerevlog.group(nodeiter, lookup):
1939 for chnk in filerevlog.group(nodeiter, lookup):
1944 yield chnk
1940 yield chnk
1945
1941
1946 yield changegroup.closechunk()
1942 yield changegroup.closechunk()
1947
1943
1948 if nodes:
1944 if nodes:
1949 self.hook('outgoing', node=hex(nodes[0]), source=source)
1945 self.hook('outgoing', node=hex(nodes[0]), source=source)
1950
1946
1951 return util.chunkbuffer(gengroup())
1947 return util.chunkbuffer(gengroup())
1952
1948
1953 def addchangegroup(self, source, srctype, url, emptyok=False):
1949 def addchangegroup(self, source, srctype, url, emptyok=False):
1954 """add changegroup to repo.
1950 """add changegroup to repo.
1955
1951
1956 return values:
1952 return values:
1957 - nothing changed or no source: 0
1953 - nothing changed or no source: 0
1958 - more heads than before: 1+added heads (2..n)
1954 - more heads than before: 1+added heads (2..n)
1959 - less heads than before: -1-removed heads (-2..-n)
1955 - less heads than before: -1-removed heads (-2..-n)
1960 - number of heads stays the same: 1
1956 - number of heads stays the same: 1
1961 """
1957 """
1962 def csmap(x):
1958 def csmap(x):
1963 self.ui.debug("add changeset %s\n" % short(x))
1959 self.ui.debug("add changeset %s\n" % short(x))
1964 return len(cl)
1960 return len(cl)
1965
1961
1966 def revmap(x):
1962 def revmap(x):
1967 return cl.rev(x)
1963 return cl.rev(x)
1968
1964
1969 if not source:
1965 if not source:
1970 return 0
1966 return 0
1971
1967
1972 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1968 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1973
1969
1974 changesets = files = revisions = 0
1970 changesets = files = revisions = 0
1975
1971
1976 # write changelog data to temp files so concurrent readers will not see
1972 # write changelog data to temp files so concurrent readers will not see
1977 # inconsistent view
1973 # inconsistent view
1978 cl = self.changelog
1974 cl = self.changelog
1979 cl.delayupdate()
1975 cl.delayupdate()
1980 oldheads = len(cl.heads())
1976 oldheads = len(cl.heads())
1981
1977
1982 tr = self.transaction()
1978 tr = self.transaction()
1983 try:
1979 try:
1984 trp = weakref.proxy(tr)
1980 trp = weakref.proxy(tr)
1985 # pull off the changeset group
1981 # pull off the changeset group
1986 self.ui.status(_("adding changesets\n"))
1982 self.ui.status(_("adding changesets\n"))
1987 clstart = len(cl)
1983 clstart = len(cl)
1988 chunkiter = changegroup.chunkiter(source)
1984 chunkiter = changegroup.chunkiter(source)
1989 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1985 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1990 raise util.Abort(_("received changelog group is empty"))
1986 raise util.Abort(_("received changelog group is empty"))
1991 clend = len(cl)
1987 clend = len(cl)
1992 changesets = clend - clstart
1988 changesets = clend - clstart
1993
1989
1994 # pull off the manifest group
1990 # pull off the manifest group
1995 self.ui.status(_("adding manifests\n"))
1991 self.ui.status(_("adding manifests\n"))
1996 chunkiter = changegroup.chunkiter(source)
1992 chunkiter = changegroup.chunkiter(source)
1997 # no need to check for empty manifest group here:
1993 # no need to check for empty manifest group here:
1998 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1994 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1999 # no new manifest will be created and the manifest group will
1995 # no new manifest will be created and the manifest group will
2000 # be empty during the pull
1996 # be empty during the pull
2001 self.manifest.addgroup(chunkiter, revmap, trp)
1997 self.manifest.addgroup(chunkiter, revmap, trp)
2002
1998
2003 # process the files
1999 # process the files
2004 self.ui.status(_("adding file changes\n"))
2000 self.ui.status(_("adding file changes\n"))
2005 while 1:
2001 while 1:
2006 f = changegroup.getchunk(source)
2002 f = changegroup.getchunk(source)
2007 if not f:
2003 if not f:
2008 break
2004 break
2009 self.ui.debug("adding %s revisions\n" % f)
2005 self.ui.debug("adding %s revisions\n" % f)
2010 fl = self.file(f)
2006 fl = self.file(f)
2011 o = len(fl)
2007 o = len(fl)
2012 chunkiter = changegroup.chunkiter(source)
2008 chunkiter = changegroup.chunkiter(source)
2013 if fl.addgroup(chunkiter, revmap, trp) is None:
2009 if fl.addgroup(chunkiter, revmap, trp) is None:
2014 raise util.Abort(_("received file revlog group is empty"))
2010 raise util.Abort(_("received file revlog group is empty"))
2015 revisions += len(fl) - o
2011 revisions += len(fl) - o
2016 files += 1
2012 files += 1
2017
2013
2018 newheads = len(cl.heads())
2014 newheads = len(cl.heads())
2019 heads = ""
2015 heads = ""
2020 if oldheads and newheads != oldheads:
2016 if oldheads and newheads != oldheads:
2021 heads = _(" (%+d heads)") % (newheads - oldheads)
2017 heads = _(" (%+d heads)") % (newheads - oldheads)
2022
2018
2023 self.ui.status(_("added %d changesets"
2019 self.ui.status(_("added %d changesets"
2024 " with %d changes to %d files%s\n")
2020 " with %d changes to %d files%s\n")
2025 % (changesets, revisions, files, heads))
2021 % (changesets, revisions, files, heads))
2026
2022
2027 if changesets > 0:
2023 if changesets > 0:
2028 p = lambda: cl.writepending() and self.root or ""
2024 p = lambda: cl.writepending() and self.root or ""
2029 self.hook('pretxnchangegroup', throw=True,
2025 self.hook('pretxnchangegroup', throw=True,
2030 node=hex(cl.node(clstart)), source=srctype,
2026 node=hex(cl.node(clstart)), source=srctype,
2031 url=url, pending=p)
2027 url=url, pending=p)
2032
2028
2033 # make changelog see real files again
2029 # make changelog see real files again
2034 cl.finalize(trp)
2030 cl.finalize(trp)
2035
2031
2036 tr.close()
2032 tr.close()
2037 finally:
2033 finally:
2038 del tr
2034 del tr
2039
2035
2040 if changesets > 0:
2036 if changesets > 0:
2041 # forcefully update the on-disk branch cache
2037 # forcefully update the on-disk branch cache
2042 self.ui.debug("updating the branch cache\n")
2038 self.ui.debug("updating the branch cache\n")
2043 self.branchtags()
2039 self.branchtags()
2044 self.hook("changegroup", node=hex(cl.node(clstart)),
2040 self.hook("changegroup", node=hex(cl.node(clstart)),
2045 source=srctype, url=url)
2041 source=srctype, url=url)
2046
2042
2047 for i in xrange(clstart, clend):
2043 for i in xrange(clstart, clend):
2048 self.hook("incoming", node=hex(cl.node(i)),
2044 self.hook("incoming", node=hex(cl.node(i)),
2049 source=srctype, url=url)
2045 source=srctype, url=url)
2050
2046
2051 # never return 0 here:
2047 # never return 0 here:
2052 if newheads < oldheads:
2048 if newheads < oldheads:
2053 return newheads - oldheads - 1
2049 return newheads - oldheads - 1
2054 else:
2050 else:
2055 return newheads - oldheads + 1
2051 return newheads - oldheads + 1
2056
2052
2057
2053
2058 def stream_in(self, remote):
2054 def stream_in(self, remote):
2059 fp = remote.stream_out()
2055 fp = remote.stream_out()
2060 l = fp.readline()
2056 l = fp.readline()
2061 try:
2057 try:
2062 resp = int(l)
2058 resp = int(l)
2063 except ValueError:
2059 except ValueError:
2064 raise error.ResponseError(
2060 raise error.ResponseError(
2065 _('Unexpected response from remote server:'), l)
2061 _('Unexpected response from remote server:'), l)
2066 if resp == 1:
2062 if resp == 1:
2067 raise util.Abort(_('operation forbidden by server'))
2063 raise util.Abort(_('operation forbidden by server'))
2068 elif resp == 2:
2064 elif resp == 2:
2069 raise util.Abort(_('locking the remote repository failed'))
2065 raise util.Abort(_('locking the remote repository failed'))
2070 elif resp != 0:
2066 elif resp != 0:
2071 raise util.Abort(_('the server sent an unknown error code'))
2067 raise util.Abort(_('the server sent an unknown error code'))
2072 self.ui.status(_('streaming all changes\n'))
2068 self.ui.status(_('streaming all changes\n'))
2073 l = fp.readline()
2069 l = fp.readline()
2074 try:
2070 try:
2075 total_files, total_bytes = map(int, l.split(' ', 1))
2071 total_files, total_bytes = map(int, l.split(' ', 1))
2076 except (ValueError, TypeError):
2072 except (ValueError, TypeError):
2077 raise error.ResponseError(
2073 raise error.ResponseError(
2078 _('Unexpected response from remote server:'), l)
2074 _('Unexpected response from remote server:'), l)
2079 self.ui.status(_('%d files to transfer, %s of data\n') %
2075 self.ui.status(_('%d files to transfer, %s of data\n') %
2080 (total_files, util.bytecount(total_bytes)))
2076 (total_files, util.bytecount(total_bytes)))
2081 start = time.time()
2077 start = time.time()
2082 for i in xrange(total_files):
2078 for i in xrange(total_files):
2083 # XXX doesn't support '\n' or '\r' in filenames
2079 # XXX doesn't support '\n' or '\r' in filenames
2084 l = fp.readline()
2080 l = fp.readline()
2085 try:
2081 try:
2086 name, size = l.split('\0', 1)
2082 name, size = l.split('\0', 1)
2087 size = int(size)
2083 size = int(size)
2088 except (ValueError, TypeError):
2084 except (ValueError, TypeError):
2089 raise error.ResponseError(
2085 raise error.ResponseError(
2090 _('Unexpected response from remote server:'), l)
2086 _('Unexpected response from remote server:'), l)
2091 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2087 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2092 # for backwards compat, name was partially encoded
2088 # for backwards compat, name was partially encoded
2093 ofp = self.sopener(store.decodedir(name), 'w')
2089 ofp = self.sopener(store.decodedir(name), 'w')
2094 for chunk in util.filechunkiter(fp, limit=size):
2090 for chunk in util.filechunkiter(fp, limit=size):
2095 ofp.write(chunk)
2091 ofp.write(chunk)
2096 ofp.close()
2092 ofp.close()
2097 elapsed = time.time() - start
2093 elapsed = time.time() - start
2098 if elapsed <= 0:
2094 if elapsed <= 0:
2099 elapsed = 0.001
2095 elapsed = 0.001
2100 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2096 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2101 (util.bytecount(total_bytes), elapsed,
2097 (util.bytecount(total_bytes), elapsed,
2102 util.bytecount(total_bytes / elapsed)))
2098 util.bytecount(total_bytes / elapsed)))
2103 self.invalidate()
2099 self.invalidate()
2104 return len(self.heads()) + 1
2100 return len(self.heads()) + 1
2105
2101
2106 def clone(self, remote, heads=[], stream=False):
2102 def clone(self, remote, heads=[], stream=False):
2107 '''clone remote repository.
2103 '''clone remote repository.
2108
2104
2109 keyword arguments:
2105 keyword arguments:
2110 heads: list of revs to clone (forces use of pull)
2106 heads: list of revs to clone (forces use of pull)
2111 stream: use streaming clone if possible'''
2107 stream: use streaming clone if possible'''
2112
2108
2113 # now, all clients that can request uncompressed clones can
2109 # now, all clients that can request uncompressed clones can
2114 # read repo formats supported by all servers that can serve
2110 # read repo formats supported by all servers that can serve
2115 # them.
2111 # them.
2116
2112
2117 # if revlog format changes, client will have to check version
2113 # if revlog format changes, client will have to check version
2118 # and format flags on "stream" capability, and use
2114 # and format flags on "stream" capability, and use
2119 # uncompressed only if compatible.
2115 # uncompressed only if compatible.
2120
2116
2121 if stream and not heads and remote.capable('stream'):
2117 if stream and not heads and remote.capable('stream'):
2122 return self.stream_in(remote)
2118 return self.stream_in(remote)
2123 return self.pull(remote, heads)
2119 return self.pull(remote, heads)
2124
2120
2125 # used to avoid circular references so destructors work
2121 # used to avoid circular references so destructors work
2126 def aftertrans(files):
2122 def aftertrans(files):
2127 renamefiles = [tuple(t) for t in files]
2123 renamefiles = [tuple(t) for t in files]
2128 def a():
2124 def a():
2129 for src, dest in renamefiles:
2125 for src, dest in renamefiles:
2130 util.rename(src, dest)
2126 util.rename(src, dest)
2131 return a
2127 return a
2132
2128
2133 def instance(ui, path, create):
2129 def instance(ui, path, create):
2134 return localrepository(ui, util.drop_scheme('file', path), create)
2130 return localrepository(ui, util.drop_scheme('file', path), create)
2135
2131
2136 def islocal(path):
2132 def islocal(path):
2137 return True
2133 return True
General Comments 0
You need to be logged in to leave comments. Login now