##// END OF EJS Templates
localrepo: add destroyed() method for strip/rollback to use (issue548).
Greg Ward -
r9150:09a1ee49 default
parent child Browse files
Show More
@@ -1,2124 +1,2135 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self.branchcache = None
101 self.branchcache = None
102 self._ubranchcache = None # UTF-8 version of branchcache
102 self._ubranchcache = None # UTF-8 version of branchcache
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.defversion = c.version
116 self.sopener.defversion = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __nonzero__(self):
132 def __nonzero__(self):
133 return True
133 return True
134
134
135 def __len__(self):
135 def __len__(self):
136 return len(self.changelog)
136 return len(self.changelog)
137
137
138 def __iter__(self):
138 def __iter__(self):
139 for i in xrange(len(self)):
139 for i in xrange(len(self)):
140 yield i
140 yield i
141
141
142 def url(self):
142 def url(self):
143 return 'file:' + self.root
143 return 'file:' + self.root
144
144
145 def hook(self, name, throw=False, **args):
145 def hook(self, name, throw=False, **args):
146 return hook.hook(self.ui, self, name, throw, **args)
146 return hook.hook(self.ui, self, name, throw, **args)
147
147
148 tag_disallowed = ':\r\n'
148 tag_disallowed = ':\r\n'
149
149
150 def _tag(self, names, node, message, local, user, date, extra={}):
150 def _tag(self, names, node, message, local, user, date, extra={}):
151 if isinstance(names, str):
151 if isinstance(names, str):
152 allchars = names
152 allchars = names
153 names = (names,)
153 names = (names,)
154 else:
154 else:
155 allchars = ''.join(names)
155 allchars = ''.join(names)
156 for c in self.tag_disallowed:
156 for c in self.tag_disallowed:
157 if c in allchars:
157 if c in allchars:
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
159
159
160 for name in names:
160 for name in names:
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
162 local=local)
162 local=local)
163
163
164 def writetags(fp, names, munge, prevtags):
164 def writetags(fp, names, munge, prevtags):
165 fp.seek(0, 2)
165 fp.seek(0, 2)
166 if prevtags and prevtags[-1] != '\n':
166 if prevtags and prevtags[-1] != '\n':
167 fp.write('\n')
167 fp.write('\n')
168 for name in names:
168 for name in names:
169 m = munge and munge(name) or name
169 m = munge and munge(name) or name
170 if self._tagtypes and name in self._tagtypes:
170 if self._tagtypes and name in self._tagtypes:
171 old = self._tags.get(name, nullid)
171 old = self._tags.get(name, nullid)
172 fp.write('%s %s\n' % (hex(old), m))
172 fp.write('%s %s\n' % (hex(old), m))
173 fp.write('%s %s\n' % (hex(node), m))
173 fp.write('%s %s\n' % (hex(node), m))
174 fp.close()
174 fp.close()
175
175
176 prevtags = ''
176 prevtags = ''
177 if local:
177 if local:
178 try:
178 try:
179 fp = self.opener('localtags', 'r+')
179 fp = self.opener('localtags', 'r+')
180 except IOError:
180 except IOError:
181 fp = self.opener('localtags', 'a')
181 fp = self.opener('localtags', 'a')
182 else:
182 else:
183 prevtags = fp.read()
183 prevtags = fp.read()
184
184
185 # local tags are stored in the current charset
185 # local tags are stored in the current charset
186 writetags(fp, names, None, prevtags)
186 writetags(fp, names, None, prevtags)
187 for name in names:
187 for name in names:
188 self.hook('tag', node=hex(node), tag=name, local=local)
188 self.hook('tag', node=hex(node), tag=name, local=local)
189 return
189 return
190
190
191 try:
191 try:
192 fp = self.wfile('.hgtags', 'rb+')
192 fp = self.wfile('.hgtags', 'rb+')
193 except IOError:
193 except IOError:
194 fp = self.wfile('.hgtags', 'ab')
194 fp = self.wfile('.hgtags', 'ab')
195 else:
195 else:
196 prevtags = fp.read()
196 prevtags = fp.read()
197
197
198 # committed tags are stored in UTF-8
198 # committed tags are stored in UTF-8
199 writetags(fp, names, encoding.fromlocal, prevtags)
199 writetags(fp, names, encoding.fromlocal, prevtags)
200
200
201 if '.hgtags' not in self.dirstate:
201 if '.hgtags' not in self.dirstate:
202 self.add(['.hgtags'])
202 self.add(['.hgtags'])
203
203
204 m = match_.exact(self.root, '', ['.hgtags'])
204 m = match_.exact(self.root, '', ['.hgtags'])
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
206
206
207 for name in names:
207 for name in names:
208 self.hook('tag', node=hex(node), tag=name, local=local)
208 self.hook('tag', node=hex(node), tag=name, local=local)
209
209
210 return tagnode
210 return tagnode
211
211
212 def tag(self, names, node, message, local, user, date):
212 def tag(self, names, node, message, local, user, date):
213 '''tag a revision with one or more symbolic names.
213 '''tag a revision with one or more symbolic names.
214
214
215 names is a list of strings or, when adding a single tag, names may be a
215 names is a list of strings or, when adding a single tag, names may be a
216 string.
216 string.
217
217
218 if local is True, the tags are stored in a per-repository file.
218 if local is True, the tags are stored in a per-repository file.
219 otherwise, they are stored in the .hgtags file, and a new
219 otherwise, they are stored in the .hgtags file, and a new
220 changeset is committed with the change.
220 changeset is committed with the change.
221
221
222 keyword arguments:
222 keyword arguments:
223
223
224 local: whether to store tags in non-version-controlled file
224 local: whether to store tags in non-version-controlled file
225 (default False)
225 (default False)
226
226
227 message: commit message to use if committing
227 message: commit message to use if committing
228
228
229 user: name of user to use if committing
229 user: name of user to use if committing
230
230
231 date: date tuple to use if committing'''
231 date: date tuple to use if committing'''
232
232
233 for x in self.status()[:5]:
233 for x in self.status()[:5]:
234 if '.hgtags' in x:
234 if '.hgtags' in x:
235 raise util.Abort(_('working copy of .hgtags is changed '
235 raise util.Abort(_('working copy of .hgtags is changed '
236 '(please commit .hgtags manually)'))
236 '(please commit .hgtags manually)'))
237
237
238 self.tags() # instantiate the cache
238 self.tags() # instantiate the cache
239 self._tag(names, node, message, local, user, date)
239 self._tag(names, node, message, local, user, date)
240
240
241 def tags(self):
241 def tags(self):
242 '''return a mapping of tag to node'''
242 '''return a mapping of tag to node'''
243 if self._tags is None:
243 if self._tags is None:
244 (self._tags, self._tagtypes) = self._findtags()
244 (self._tags, self._tagtypes) = self._findtags()
245
245
246 return self._tags
246 return self._tags
247
247
248 def _findtags(self):
248 def _findtags(self):
249 '''Do the hard work of finding tags. Return a pair of dicts
249 '''Do the hard work of finding tags. Return a pair of dicts
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
251 maps tag name to a string like \'global\' or \'local\'.
251 maps tag name to a string like \'global\' or \'local\'.
252 Subclasses or extensions are free to add their own tags, but
252 Subclasses or extensions are free to add their own tags, but
253 should be aware that the returned dicts will be retained for the
253 should be aware that the returned dicts will be retained for the
254 duration of the localrepo object.'''
254 duration of the localrepo object.'''
255
255
256 # XXX what tagtype should subclasses/extensions use? Currently
256 # XXX what tagtype should subclasses/extensions use? Currently
257 # mq and bookmarks add tags, but do not set the tagtype at all.
257 # mq and bookmarks add tags, but do not set the tagtype at all.
258 # Should each extension invent its own tag type? Should there
258 # Should each extension invent its own tag type? Should there
259 # be one tagtype for all such "virtual" tags? Or is the status
259 # be one tagtype for all such "virtual" tags? Or is the status
260 # quo fine?
260 # quo fine?
261
261
262 alltags = {} # map tag name to (node, hist)
262 alltags = {} # map tag name to (node, hist)
263 tagtypes = {}
263 tagtypes = {}
264
264
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
267
267
268 tags = {}
268 tags = {}
269 for (name, (node, hist)) in alltags.iteritems():
269 for (name, (node, hist)) in alltags.iteritems():
270 if node != nullid:
270 if node != nullid:
271 tags[name] = node
271 tags[name] = node
272 tags['tip'] = self.changelog.tip()
272 tags['tip'] = self.changelog.tip()
273 return (tags, tagtypes)
273 return (tags, tagtypes)
274
274
275 def tagtype(self, tagname):
275 def tagtype(self, tagname):
276 '''
276 '''
277 return the type of the given tag. result can be:
277 return the type of the given tag. result can be:
278
278
279 'local' : a local tag
279 'local' : a local tag
280 'global' : a global tag
280 'global' : a global tag
281 None : tag does not exist
281 None : tag does not exist
282 '''
282 '''
283
283
284 self.tags()
284 self.tags()
285
285
286 return self._tagtypes.get(tagname)
286 return self._tagtypes.get(tagname)
287
287
288 def tagslist(self):
288 def tagslist(self):
289 '''return a list of tags ordered by revision'''
289 '''return a list of tags ordered by revision'''
290 l = []
290 l = []
291 for t, n in self.tags().iteritems():
291 for t, n in self.tags().iteritems():
292 try:
292 try:
293 r = self.changelog.rev(n)
293 r = self.changelog.rev(n)
294 except:
294 except:
295 r = -2 # sort to the beginning of the list if unknown
295 r = -2 # sort to the beginning of the list if unknown
296 l.append((r, t, n))
296 l.append((r, t, n))
297 return [(t, n) for r, t, n in sorted(l)]
297 return [(t, n) for r, t, n in sorted(l)]
298
298
299 def nodetags(self, node):
299 def nodetags(self, node):
300 '''return the tags associated with a node'''
300 '''return the tags associated with a node'''
301 if not self.nodetagscache:
301 if not self.nodetagscache:
302 self.nodetagscache = {}
302 self.nodetagscache = {}
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 self.nodetagscache.setdefault(n, []).append(t)
304 self.nodetagscache.setdefault(n, []).append(t)
305 return self.nodetagscache.get(node, [])
305 return self.nodetagscache.get(node, [])
306
306
307 def _branchtags(self, partial, lrev):
307 def _branchtags(self, partial, lrev):
308 # TODO: rename this function?
308 # TODO: rename this function?
309 tiprev = len(self) - 1
309 tiprev = len(self) - 1
310 if lrev != tiprev:
310 if lrev != tiprev:
311 self._updatebranchcache(partial, lrev+1, tiprev+1)
311 self._updatebranchcache(partial, lrev+1, tiprev+1)
312 self._writebranchcache(partial, self.changelog.tip(), tiprev)
312 self._writebranchcache(partial, self.changelog.tip(), tiprev)
313
313
314 return partial
314 return partial
315
315
316 def branchmap(self):
316 def branchmap(self):
317 tip = self.changelog.tip()
317 tip = self.changelog.tip()
318 if self.branchcache is not None and self._branchcachetip == tip:
318 if self.branchcache is not None and self._branchcachetip == tip:
319 return self.branchcache
319 return self.branchcache
320
320
321 oldtip = self._branchcachetip
321 oldtip = self._branchcachetip
322 self._branchcachetip = tip
322 self._branchcachetip = tip
323 if self.branchcache is None:
323 if self.branchcache is None:
324 self.branchcache = {} # avoid recursion in changectx
324 self.branchcache = {} # avoid recursion in changectx
325 else:
325 else:
326 self.branchcache.clear() # keep using the same dict
326 self.branchcache.clear() # keep using the same dict
327 if oldtip is None or oldtip not in self.changelog.nodemap:
327 if oldtip is None or oldtip not in self.changelog.nodemap:
328 partial, last, lrev = self._readbranchcache()
328 partial, last, lrev = self._readbranchcache()
329 else:
329 else:
330 lrev = self.changelog.rev(oldtip)
330 lrev = self.changelog.rev(oldtip)
331 partial = self._ubranchcache
331 partial = self._ubranchcache
332
332
333 self._branchtags(partial, lrev)
333 self._branchtags(partial, lrev)
334 # this private cache holds all heads (not just tips)
334 # this private cache holds all heads (not just tips)
335 self._ubranchcache = partial
335 self._ubranchcache = partial
336
336
337 # the branch cache is stored on disk as UTF-8, but in the local
337 # the branch cache is stored on disk as UTF-8, but in the local
338 # charset internally
338 # charset internally
339 for k, v in partial.iteritems():
339 for k, v in partial.iteritems():
340 self.branchcache[encoding.tolocal(k)] = v
340 self.branchcache[encoding.tolocal(k)] = v
341 return self.branchcache
341 return self.branchcache
342
342
343
343
344 def branchtags(self):
344 def branchtags(self):
345 '''return a dict where branch names map to the tipmost head of
345 '''return a dict where branch names map to the tipmost head of
346 the branch, open heads come before closed'''
346 the branch, open heads come before closed'''
347 bt = {}
347 bt = {}
348 for bn, heads in self.branchmap().iteritems():
348 for bn, heads in self.branchmap().iteritems():
349 head = None
349 head = None
350 for i in range(len(heads)-1, -1, -1):
350 for i in range(len(heads)-1, -1, -1):
351 h = heads[i]
351 h = heads[i]
352 if 'close' not in self.changelog.read(h)[5]:
352 if 'close' not in self.changelog.read(h)[5]:
353 head = h
353 head = h
354 break
354 break
355 # no open heads were found
355 # no open heads were found
356 if head is None:
356 if head is None:
357 head = heads[-1]
357 head = heads[-1]
358 bt[bn] = head
358 bt[bn] = head
359 return bt
359 return bt
360
360
361
361
362 def _readbranchcache(self):
362 def _readbranchcache(self):
363 partial = {}
363 partial = {}
364 try:
364 try:
365 f = self.opener("branchheads.cache")
365 f = self.opener("branchheads.cache")
366 lines = f.read().split('\n')
366 lines = f.read().split('\n')
367 f.close()
367 f.close()
368 except (IOError, OSError):
368 except (IOError, OSError):
369 return {}, nullid, nullrev
369 return {}, nullid, nullrev
370
370
371 try:
371 try:
372 last, lrev = lines.pop(0).split(" ", 1)
372 last, lrev = lines.pop(0).split(" ", 1)
373 last, lrev = bin(last), int(lrev)
373 last, lrev = bin(last), int(lrev)
374 if lrev >= len(self) or self[lrev].node() != last:
374 if lrev >= len(self) or self[lrev].node() != last:
375 # invalidate the cache
375 # invalidate the cache
376 raise ValueError('invalidating branch cache (tip differs)')
376 raise ValueError('invalidating branch cache (tip differs)')
377 for l in lines:
377 for l in lines:
378 if not l: continue
378 if not l: continue
379 node, label = l.split(" ", 1)
379 node, label = l.split(" ", 1)
380 partial.setdefault(label.strip(), []).append(bin(node))
380 partial.setdefault(label.strip(), []).append(bin(node))
381 except KeyboardInterrupt:
381 except KeyboardInterrupt:
382 raise
382 raise
383 except Exception, inst:
383 except Exception, inst:
384 if self.ui.debugflag:
384 if self.ui.debugflag:
385 self.ui.warn(str(inst), '\n')
385 self.ui.warn(str(inst), '\n')
386 partial, last, lrev = {}, nullid, nullrev
386 partial, last, lrev = {}, nullid, nullrev
387 return partial, last, lrev
387 return partial, last, lrev
388
388
389 def _writebranchcache(self, branches, tip, tiprev):
389 def _writebranchcache(self, branches, tip, tiprev):
390 try:
390 try:
391 f = self.opener("branchheads.cache", "w", atomictemp=True)
391 f = self.opener("branchheads.cache", "w", atomictemp=True)
392 f.write("%s %s\n" % (hex(tip), tiprev))
392 f.write("%s %s\n" % (hex(tip), tiprev))
393 for label, nodes in branches.iteritems():
393 for label, nodes in branches.iteritems():
394 for node in nodes:
394 for node in nodes:
395 f.write("%s %s\n" % (hex(node), label))
395 f.write("%s %s\n" % (hex(node), label))
396 f.rename()
396 f.rename()
397 except (IOError, OSError):
397 except (IOError, OSError):
398 pass
398 pass
399
399
400 def _updatebranchcache(self, partial, start, end):
400 def _updatebranchcache(self, partial, start, end):
401 # collect new branch entries
401 # collect new branch entries
402 newbranches = {}
402 newbranches = {}
403 for r in xrange(start, end):
403 for r in xrange(start, end):
404 c = self[r]
404 c = self[r]
405 newbranches.setdefault(c.branch(), []).append(c.node())
405 newbranches.setdefault(c.branch(), []).append(c.node())
406 # if older branchheads are reachable from new ones, they aren't
406 # if older branchheads are reachable from new ones, they aren't
407 # really branchheads. Note checking parents is insufficient:
407 # really branchheads. Note checking parents is insufficient:
408 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
408 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 for branch, newnodes in newbranches.iteritems():
409 for branch, newnodes in newbranches.iteritems():
410 bheads = partial.setdefault(branch, [])
410 bheads = partial.setdefault(branch, [])
411 bheads.extend(newnodes)
411 bheads.extend(newnodes)
412 if len(bheads) < 2:
412 if len(bheads) < 2:
413 continue
413 continue
414 newbheads = []
414 newbheads = []
415 # starting from tip means fewer passes over reachable
415 # starting from tip means fewer passes over reachable
416 while newnodes:
416 while newnodes:
417 latest = newnodes.pop()
417 latest = newnodes.pop()
418 if latest not in bheads:
418 if latest not in bheads:
419 continue
419 continue
420 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
420 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 reachable = self.changelog.reachable(latest, minbhrev)
421 reachable = self.changelog.reachable(latest, minbhrev)
422 bheads = [b for b in bheads if b not in reachable]
422 bheads = [b for b in bheads if b not in reachable]
423 newbheads.insert(0, latest)
423 newbheads.insert(0, latest)
424 bheads.extend(newbheads)
424 bheads.extend(newbheads)
425 partial[branch] = bheads
425 partial[branch] = bheads
426
426
427 def lookup(self, key):
427 def lookup(self, key):
428 if isinstance(key, int):
428 if isinstance(key, int):
429 return self.changelog.node(key)
429 return self.changelog.node(key)
430 elif key == '.':
430 elif key == '.':
431 return self.dirstate.parents()[0]
431 return self.dirstate.parents()[0]
432 elif key == 'null':
432 elif key == 'null':
433 return nullid
433 return nullid
434 elif key == 'tip':
434 elif key == 'tip':
435 return self.changelog.tip()
435 return self.changelog.tip()
436 n = self.changelog._match(key)
436 n = self.changelog._match(key)
437 if n:
437 if n:
438 return n
438 return n
439 if key in self.tags():
439 if key in self.tags():
440 return self.tags()[key]
440 return self.tags()[key]
441 if key in self.branchtags():
441 if key in self.branchtags():
442 return self.branchtags()[key]
442 return self.branchtags()[key]
443 n = self.changelog._partialmatch(key)
443 n = self.changelog._partialmatch(key)
444 if n:
444 if n:
445 return n
445 return n
446
446
447 # can't find key, check if it might have come from damaged dirstate
447 # can't find key, check if it might have come from damaged dirstate
448 if key in self.dirstate.parents():
448 if key in self.dirstate.parents():
449 raise error.Abort(_("working directory has unknown parent '%s'!")
449 raise error.Abort(_("working directory has unknown parent '%s'!")
450 % short(key))
450 % short(key))
451 try:
451 try:
452 if len(key) == 20:
452 if len(key) == 20:
453 key = hex(key)
453 key = hex(key)
454 except:
454 except:
455 pass
455 pass
456 raise error.RepoError(_("unknown revision '%s'") % key)
456 raise error.RepoError(_("unknown revision '%s'") % key)
457
457
458 def local(self):
458 def local(self):
459 return True
459 return True
460
460
461 def join(self, f):
461 def join(self, f):
462 return os.path.join(self.path, f)
462 return os.path.join(self.path, f)
463
463
464 def wjoin(self, f):
464 def wjoin(self, f):
465 return os.path.join(self.root, f)
465 return os.path.join(self.root, f)
466
466
467 def rjoin(self, f):
467 def rjoin(self, f):
468 return os.path.join(self.root, util.pconvert(f))
468 return os.path.join(self.root, util.pconvert(f))
469
469
470 def file(self, f):
470 def file(self, f):
471 if f[0] == '/':
471 if f[0] == '/':
472 f = f[1:]
472 f = f[1:]
473 return filelog.filelog(self.sopener, f)
473 return filelog.filelog(self.sopener, f)
474
474
475 def changectx(self, changeid):
475 def changectx(self, changeid):
476 return self[changeid]
476 return self[changeid]
477
477
478 def parents(self, changeid=None):
478 def parents(self, changeid=None):
479 '''get list of changectxs for parents of changeid'''
479 '''get list of changectxs for parents of changeid'''
480 return self[changeid].parents()
480 return self[changeid].parents()
481
481
482 def filectx(self, path, changeid=None, fileid=None):
482 def filectx(self, path, changeid=None, fileid=None):
483 """changeid can be a changeset revision, node, or tag.
483 """changeid can be a changeset revision, node, or tag.
484 fileid can be a file revision or node."""
484 fileid can be a file revision or node."""
485 return context.filectx(self, path, changeid, fileid)
485 return context.filectx(self, path, changeid, fileid)
486
486
487 def getcwd(self):
487 def getcwd(self):
488 return self.dirstate.getcwd()
488 return self.dirstate.getcwd()
489
489
490 def pathto(self, f, cwd=None):
490 def pathto(self, f, cwd=None):
491 return self.dirstate.pathto(f, cwd)
491 return self.dirstate.pathto(f, cwd)
492
492
493 def wfile(self, f, mode='r'):
493 def wfile(self, f, mode='r'):
494 return self.wopener(f, mode)
494 return self.wopener(f, mode)
495
495
496 def _link(self, f):
496 def _link(self, f):
497 return os.path.islink(self.wjoin(f))
497 return os.path.islink(self.wjoin(f))
498
498
499 def _filter(self, filter, filename, data):
499 def _filter(self, filter, filename, data):
500 if filter not in self.filterpats:
500 if filter not in self.filterpats:
501 l = []
501 l = []
502 for pat, cmd in self.ui.configitems(filter):
502 for pat, cmd in self.ui.configitems(filter):
503 if cmd == '!':
503 if cmd == '!':
504 continue
504 continue
505 mf = match_.match(self.root, '', [pat])
505 mf = match_.match(self.root, '', [pat])
506 fn = None
506 fn = None
507 params = cmd
507 params = cmd
508 for name, filterfn in self._datafilters.iteritems():
508 for name, filterfn in self._datafilters.iteritems():
509 if cmd.startswith(name):
509 if cmd.startswith(name):
510 fn = filterfn
510 fn = filterfn
511 params = cmd[len(name):].lstrip()
511 params = cmd[len(name):].lstrip()
512 break
512 break
513 if not fn:
513 if not fn:
514 fn = lambda s, c, **kwargs: util.filter(s, c)
514 fn = lambda s, c, **kwargs: util.filter(s, c)
515 # Wrap old filters not supporting keyword arguments
515 # Wrap old filters not supporting keyword arguments
516 if not inspect.getargspec(fn)[2]:
516 if not inspect.getargspec(fn)[2]:
517 oldfn = fn
517 oldfn = fn
518 fn = lambda s, c, **kwargs: oldfn(s, c)
518 fn = lambda s, c, **kwargs: oldfn(s, c)
519 l.append((mf, fn, params))
519 l.append((mf, fn, params))
520 self.filterpats[filter] = l
520 self.filterpats[filter] = l
521
521
522 for mf, fn, cmd in self.filterpats[filter]:
522 for mf, fn, cmd in self.filterpats[filter]:
523 if mf(filename):
523 if mf(filename):
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
525 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 break
526 break
527
527
528 return data
528 return data
529
529
530 def adddatafilter(self, name, filter):
530 def adddatafilter(self, name, filter):
531 self._datafilters[name] = filter
531 self._datafilters[name] = filter
532
532
533 def wread(self, filename):
533 def wread(self, filename):
534 if self._link(filename):
534 if self._link(filename):
535 data = os.readlink(self.wjoin(filename))
535 data = os.readlink(self.wjoin(filename))
536 else:
536 else:
537 data = self.wopener(filename, 'r').read()
537 data = self.wopener(filename, 'r').read()
538 return self._filter("encode", filename, data)
538 return self._filter("encode", filename, data)
539
539
540 def wwrite(self, filename, data, flags):
540 def wwrite(self, filename, data, flags):
541 data = self._filter("decode", filename, data)
541 data = self._filter("decode", filename, data)
542 try:
542 try:
543 os.unlink(self.wjoin(filename))
543 os.unlink(self.wjoin(filename))
544 except OSError:
544 except OSError:
545 pass
545 pass
546 if 'l' in flags:
546 if 'l' in flags:
547 self.wopener.symlink(data, filename)
547 self.wopener.symlink(data, filename)
548 else:
548 else:
549 self.wopener(filename, 'w').write(data)
549 self.wopener(filename, 'w').write(data)
550 if 'x' in flags:
550 if 'x' in flags:
551 util.set_flags(self.wjoin(filename), False, True)
551 util.set_flags(self.wjoin(filename), False, True)
552
552
553 def wwritedata(self, filename, data):
553 def wwritedata(self, filename, data):
554 return self._filter("decode", filename, data)
554 return self._filter("decode", filename, data)
555
555
556 def transaction(self):
556 def transaction(self):
557 tr = self._transref and self._transref() or None
557 tr = self._transref and self._transref() or None
558 if tr and tr.running():
558 if tr and tr.running():
559 return tr.nest()
559 return tr.nest()
560
560
561 # abort here if the journal already exists
561 # abort here if the journal already exists
562 if os.path.exists(self.sjoin("journal")):
562 if os.path.exists(self.sjoin("journal")):
563 raise error.RepoError(_("journal already exists - run hg recover"))
563 raise error.RepoError(_("journal already exists - run hg recover"))
564
564
565 # save dirstate for rollback
565 # save dirstate for rollback
566 try:
566 try:
567 ds = self.opener("dirstate").read()
567 ds = self.opener("dirstate").read()
568 except IOError:
568 except IOError:
569 ds = ""
569 ds = ""
570 self.opener("journal.dirstate", "w").write(ds)
570 self.opener("journal.dirstate", "w").write(ds)
571 self.opener("journal.branch", "w").write(self.dirstate.branch())
571 self.opener("journal.branch", "w").write(self.dirstate.branch())
572
572
573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 (self.join("journal.dirstate"), self.join("undo.dirstate")),
574 (self.join("journal.dirstate"), self.join("undo.dirstate")),
575 (self.join("journal.branch"), self.join("undo.branch"))]
575 (self.join("journal.branch"), self.join("undo.branch"))]
576 tr = transaction.transaction(self.ui.warn, self.sopener,
576 tr = transaction.transaction(self.ui.warn, self.sopener,
577 self.sjoin("journal"),
577 self.sjoin("journal"),
578 aftertrans(renames),
578 aftertrans(renames),
579 self.store.createmode)
579 self.store.createmode)
580 self._transref = weakref.ref(tr)
580 self._transref = weakref.ref(tr)
581 return tr
581 return tr
582
582
583 def recover(self):
583 def recover(self):
584 lock = self.lock()
584 lock = self.lock()
585 try:
585 try:
586 if os.path.exists(self.sjoin("journal")):
586 if os.path.exists(self.sjoin("journal")):
587 self.ui.status(_("rolling back interrupted transaction\n"))
587 self.ui.status(_("rolling back interrupted transaction\n"))
588 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
588 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
589 self.invalidate()
589 self.invalidate()
590 return True
590 return True
591 else:
591 else:
592 self.ui.warn(_("no interrupted transaction available\n"))
592 self.ui.warn(_("no interrupted transaction available\n"))
593 return False
593 return False
594 finally:
594 finally:
595 lock.release()
595 lock.release()
596
596
597 def rollback(self):
597 def rollback(self):
598 wlock = lock = None
598 wlock = lock = None
599 try:
599 try:
600 wlock = self.wlock()
600 wlock = self.wlock()
601 lock = self.lock()
601 lock = self.lock()
602 if os.path.exists(self.sjoin("undo")):
602 if os.path.exists(self.sjoin("undo")):
603 self.ui.status(_("rolling back last transaction\n"))
603 self.ui.status(_("rolling back last transaction\n"))
604 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
604 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
605 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
605 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
606 try:
606 try:
607 branch = self.opener("undo.branch").read()
607 branch = self.opener("undo.branch").read()
608 self.dirstate.setbranch(branch)
608 self.dirstate.setbranch(branch)
609 except IOError:
609 except IOError:
610 self.ui.warn(_("Named branch could not be reset, "
610 self.ui.warn(_("Named branch could not be reset, "
611 "current branch still is: %s\n")
611 "current branch still is: %s\n")
612 % encoding.tolocal(self.dirstate.branch()))
612 % encoding.tolocal(self.dirstate.branch()))
613 self.invalidate()
613 self.invalidate()
614 self.dirstate.invalidate()
614 self.dirstate.invalidate()
615 self.destroyed()
615 else:
616 else:
616 self.ui.warn(_("no rollback information available\n"))
617 self.ui.warn(_("no rollback information available\n"))
617 finally:
618 finally:
618 release(lock, wlock)
619 release(lock, wlock)
619
620
620 def invalidate(self):
621 def invalidate(self):
621 for a in "changelog manifest".split():
622 for a in "changelog manifest".split():
622 if a in self.__dict__:
623 if a in self.__dict__:
623 delattr(self, a)
624 delattr(self, a)
624 self._tags = None
625 self._tags = None
625 self._tagtypes = None
626 self._tagtypes = None
626 self.nodetagscache = None
627 self.nodetagscache = None
627 self.branchcache = None
628 self.branchcache = None
628 self._ubranchcache = None
629 self._ubranchcache = None
629 self._branchcachetip = None
630 self._branchcachetip = None
630
631
631 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
632 try:
633 try:
633 l = lock.lock(lockname, 0, releasefn, desc=desc)
634 l = lock.lock(lockname, 0, releasefn, desc=desc)
634 except error.LockHeld, inst:
635 except error.LockHeld, inst:
635 if not wait:
636 if not wait:
636 raise
637 raise
637 self.ui.warn(_("waiting for lock on %s held by %r\n") %
638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
638 (desc, inst.locker))
639 (desc, inst.locker))
639 # default to 600 seconds timeout
640 # default to 600 seconds timeout
640 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
641 releasefn, desc=desc)
642 releasefn, desc=desc)
642 if acquirefn:
643 if acquirefn:
643 acquirefn()
644 acquirefn()
644 return l
645 return l
645
646
646 def lock(self, wait=True):
647 def lock(self, wait=True):
647 l = self._lockref and self._lockref()
648 l = self._lockref and self._lockref()
648 if l is not None and l.held:
649 if l is not None and l.held:
649 l.lock()
650 l.lock()
650 return l
651 return l
651
652
652 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
653 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
653 _('repository %s') % self.origroot)
654 _('repository %s') % self.origroot)
654 self._lockref = weakref.ref(l)
655 self._lockref = weakref.ref(l)
655 return l
656 return l
656
657
657 def wlock(self, wait=True):
658 def wlock(self, wait=True):
658 l = self._wlockref and self._wlockref()
659 l = self._wlockref and self._wlockref()
659 if l is not None and l.held:
660 if l is not None and l.held:
660 l.lock()
661 l.lock()
661 return l
662 return l
662
663
663 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 self.dirstate.invalidate, _('working directory of %s') %
665 self.dirstate.invalidate, _('working directory of %s') %
665 self.origroot)
666 self.origroot)
666 self._wlockref = weakref.ref(l)
667 self._wlockref = weakref.ref(l)
667 return l
668 return l
668
669
669 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
670 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
670 """
671 """
671 commit an individual file as part of a larger transaction
672 commit an individual file as part of a larger transaction
672 """
673 """
673
674
674 fname = fctx.path()
675 fname = fctx.path()
675 text = fctx.data()
676 text = fctx.data()
676 flog = self.file(fname)
677 flog = self.file(fname)
677 fparent1 = manifest1.get(fname, nullid)
678 fparent1 = manifest1.get(fname, nullid)
678 fparent2 = fparent2o = manifest2.get(fname, nullid)
679 fparent2 = fparent2o = manifest2.get(fname, nullid)
679
680
680 meta = {}
681 meta = {}
681 copy = fctx.renamed()
682 copy = fctx.renamed()
682 if copy and copy[0] != fname:
683 if copy and copy[0] != fname:
683 # Mark the new revision of this file as a copy of another
684 # Mark the new revision of this file as a copy of another
684 # file. This copy data will effectively act as a parent
685 # file. This copy data will effectively act as a parent
685 # of this new revision. If this is a merge, the first
686 # of this new revision. If this is a merge, the first
686 # parent will be the nullid (meaning "look up the copy data")
687 # parent will be the nullid (meaning "look up the copy data")
687 # and the second one will be the other parent. For example:
688 # and the second one will be the other parent. For example:
688 #
689 #
689 # 0 --- 1 --- 3 rev1 changes file foo
690 # 0 --- 1 --- 3 rev1 changes file foo
690 # \ / rev2 renames foo to bar and changes it
691 # \ / rev2 renames foo to bar and changes it
691 # \- 2 -/ rev3 should have bar with all changes and
692 # \- 2 -/ rev3 should have bar with all changes and
692 # should record that bar descends from
693 # should record that bar descends from
693 # bar in rev2 and foo in rev1
694 # bar in rev2 and foo in rev1
694 #
695 #
695 # this allows this merge to succeed:
696 # this allows this merge to succeed:
696 #
697 #
697 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
698 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
698 # \ / merging rev3 and rev4 should use bar@rev2
699 # \ / merging rev3 and rev4 should use bar@rev2
699 # \- 2 --- 4 as the merge base
700 # \- 2 --- 4 as the merge base
700 #
701 #
701
702
702 cfname = copy[0]
703 cfname = copy[0]
703 crev = manifest1.get(cfname)
704 crev = manifest1.get(cfname)
704 newfparent = fparent2
705 newfparent = fparent2
705
706
706 if manifest2: # branch merge
707 if manifest2: # branch merge
707 if fparent2 == nullid or crev is None: # copied on remote side
708 if fparent2 == nullid or crev is None: # copied on remote side
708 if cfname in manifest2:
709 if cfname in manifest2:
709 crev = manifest2[cfname]
710 crev = manifest2[cfname]
710 newfparent = fparent1
711 newfparent = fparent1
711
712
712 # find source in nearest ancestor if we've lost track
713 # find source in nearest ancestor if we've lost track
713 if not crev:
714 if not crev:
714 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
715 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
715 (fname, cfname))
716 (fname, cfname))
716 for ancestor in self['.'].ancestors():
717 for ancestor in self['.'].ancestors():
717 if cfname in ancestor:
718 if cfname in ancestor:
718 crev = ancestor[cfname].filenode()
719 crev = ancestor[cfname].filenode()
719 break
720 break
720
721
721 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
722 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
722 meta["copy"] = cfname
723 meta["copy"] = cfname
723 meta["copyrev"] = hex(crev)
724 meta["copyrev"] = hex(crev)
724 fparent1, fparent2 = nullid, newfparent
725 fparent1, fparent2 = nullid, newfparent
725 elif fparent2 != nullid:
726 elif fparent2 != nullid:
726 # is one parent an ancestor of the other?
727 # is one parent an ancestor of the other?
727 fparentancestor = flog.ancestor(fparent1, fparent2)
728 fparentancestor = flog.ancestor(fparent1, fparent2)
728 if fparentancestor == fparent1:
729 if fparentancestor == fparent1:
729 fparent1, fparent2 = fparent2, nullid
730 fparent1, fparent2 = fparent2, nullid
730 elif fparentancestor == fparent2:
731 elif fparentancestor == fparent2:
731 fparent2 = nullid
732 fparent2 = nullid
732
733
733 # is the file changed?
734 # is the file changed?
734 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
735 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
735 changelist.append(fname)
736 changelist.append(fname)
736 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
737 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
737
738
738 # are just the flags changed during merge?
739 # are just the flags changed during merge?
739 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
740 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
740 changelist.append(fname)
741 changelist.append(fname)
741
742
742 return fparent1
743 return fparent1
743
744
744 def commit(self, text="", user=None, date=None, match=None, force=False,
745 def commit(self, text="", user=None, date=None, match=None, force=False,
745 editor=False, extra={}):
746 editor=False, extra={}):
746 """Add a new revision to current repository.
747 """Add a new revision to current repository.
747
748
748 Revision information is gathered from the working directory,
749 Revision information is gathered from the working directory,
749 match can be used to filter the committed files. If editor is
750 match can be used to filter the committed files. If editor is
750 supplied, it is called to get a commit message.
751 supplied, it is called to get a commit message.
751 """
752 """
752
753
753 def fail(f, msg):
754 def fail(f, msg):
754 raise util.Abort('%s: %s' % (f, msg))
755 raise util.Abort('%s: %s' % (f, msg))
755
756
756 if not match:
757 if not match:
757 match = match_.always(self.root, '')
758 match = match_.always(self.root, '')
758
759
759 if not force:
760 if not force:
760 vdirs = []
761 vdirs = []
761 match.dir = vdirs.append
762 match.dir = vdirs.append
762 match.bad = fail
763 match.bad = fail
763
764
764 wlock = self.wlock()
765 wlock = self.wlock()
765 try:
766 try:
766 p1, p2 = self.dirstate.parents()
767 p1, p2 = self.dirstate.parents()
767 wctx = self[None]
768 wctx = self[None]
768
769
769 if (not force and p2 != nullid and match and
770 if (not force and p2 != nullid and match and
770 (match.files() or match.anypats())):
771 (match.files() or match.anypats())):
771 raise util.Abort(_('cannot partially commit a merge '
772 raise util.Abort(_('cannot partially commit a merge '
772 '(do not specify files or patterns)'))
773 '(do not specify files or patterns)'))
773
774
774 changes = self.status(match=match, clean=force)
775 changes = self.status(match=match, clean=force)
775 if force:
776 if force:
776 changes[0].extend(changes[6]) # mq may commit unchanged files
777 changes[0].extend(changes[6]) # mq may commit unchanged files
777
778
778 # check subrepos
779 # check subrepos
779 subs = []
780 subs = []
780 for s in wctx.substate:
781 for s in wctx.substate:
781 if match(s) and wctx.sub(s).dirty():
782 if match(s) and wctx.sub(s).dirty():
782 subs.append(s)
783 subs.append(s)
783 if subs and '.hgsubstate' not in changes[0]:
784 if subs and '.hgsubstate' not in changes[0]:
784 changes[0].insert(0, '.hgsubstate')
785 changes[0].insert(0, '.hgsubstate')
785
786
786 # make sure all explicit patterns are matched
787 # make sure all explicit patterns are matched
787 if not force and match.files():
788 if not force and match.files():
788 matched = set(changes[0] + changes[1] + changes[2])
789 matched = set(changes[0] + changes[1] + changes[2])
789
790
790 for f in match.files():
791 for f in match.files():
791 if f == '.' or f in matched or f in wctx.substate:
792 if f == '.' or f in matched or f in wctx.substate:
792 continue
793 continue
793 if f in changes[3]: # missing
794 if f in changes[3]: # missing
794 fail(f, _('file not found!'))
795 fail(f, _('file not found!'))
795 if f in vdirs: # visited directory
796 if f in vdirs: # visited directory
796 d = f + '/'
797 d = f + '/'
797 for mf in matched:
798 for mf in matched:
798 if mf.startswith(d):
799 if mf.startswith(d):
799 break
800 break
800 else:
801 else:
801 fail(f, _("no match under directory!"))
802 fail(f, _("no match under directory!"))
802 elif f not in self.dirstate:
803 elif f not in self.dirstate:
803 fail(f, _("file not tracked!"))
804 fail(f, _("file not tracked!"))
804
805
805 if (not force and not extra.get("close") and p2 == nullid
806 if (not force and not extra.get("close") and p2 == nullid
806 and not (changes[0] or changes[1] or changes[2])
807 and not (changes[0] or changes[1] or changes[2])
807 and self[None].branch() == self['.'].branch()):
808 and self[None].branch() == self['.'].branch()):
808 return None
809 return None
809
810
810 ms = merge_.mergestate(self)
811 ms = merge_.mergestate(self)
811 for f in changes[0]:
812 for f in changes[0]:
812 if f in ms and ms[f] == 'u':
813 if f in ms and ms[f] == 'u':
813 raise util.Abort(_("unresolved merge conflicts "
814 raise util.Abort(_("unresolved merge conflicts "
814 "(see hg resolve)"))
815 "(see hg resolve)"))
815
816
816 cctx = context.workingctx(self, (p1, p2), text, user, date,
817 cctx = context.workingctx(self, (p1, p2), text, user, date,
817 extra, changes)
818 extra, changes)
818 if editor:
819 if editor:
819 cctx._text = editor(self, cctx, subs)
820 cctx._text = editor(self, cctx, subs)
820
821
821 # commit subs
822 # commit subs
822 if subs:
823 if subs:
823 state = wctx.substate.copy()
824 state = wctx.substate.copy()
824 for s in subs:
825 for s in subs:
825 self.ui.status(_('committing subrepository %s\n') % s)
826 self.ui.status(_('committing subrepository %s\n') % s)
826 sr = wctx.sub(s).commit(cctx._text, user, date)
827 sr = wctx.sub(s).commit(cctx._text, user, date)
827 state[s] = (state[s][0], sr)
828 state[s] = (state[s][0], sr)
828 subrepo.writestate(self, state)
829 subrepo.writestate(self, state)
829
830
830 ret = self.commitctx(cctx, True)
831 ret = self.commitctx(cctx, True)
831
832
832 # update dirstate and mergestate
833 # update dirstate and mergestate
833 for f in changes[0] + changes[1]:
834 for f in changes[0] + changes[1]:
834 self.dirstate.normal(f)
835 self.dirstate.normal(f)
835 for f in changes[2]:
836 for f in changes[2]:
836 self.dirstate.forget(f)
837 self.dirstate.forget(f)
837 self.dirstate.setparents(ret)
838 self.dirstate.setparents(ret)
838 ms.reset()
839 ms.reset()
839
840
840 return ret
841 return ret
841
842
842 finally:
843 finally:
843 wlock.release()
844 wlock.release()
844
845
845 def commitctx(self, ctx, error=False):
846 def commitctx(self, ctx, error=False):
846 """Add a new revision to current repository.
847 """Add a new revision to current repository.
847
848
848 Revision information is passed via the context argument.
849 Revision information is passed via the context argument.
849 """
850 """
850
851
851 tr = lock = None
852 tr = lock = None
852 removed = ctx.removed()
853 removed = ctx.removed()
853 p1, p2 = ctx.p1(), ctx.p2()
854 p1, p2 = ctx.p1(), ctx.p2()
854 m1 = p1.manifest().copy()
855 m1 = p1.manifest().copy()
855 m2 = p2.manifest()
856 m2 = p2.manifest()
856 user = ctx.user()
857 user = ctx.user()
857
858
858 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
859 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
859 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
860 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
860
861
861 lock = self.lock()
862 lock = self.lock()
862 try:
863 try:
863 tr = self.transaction()
864 tr = self.transaction()
864 trp = weakref.proxy(tr)
865 trp = weakref.proxy(tr)
865
866
866 # check in files
867 # check in files
867 new = {}
868 new = {}
868 changed = []
869 changed = []
869 linkrev = len(self)
870 linkrev = len(self)
870 for f in sorted(ctx.modified() + ctx.added()):
871 for f in sorted(ctx.modified() + ctx.added()):
871 self.ui.note(f + "\n")
872 self.ui.note(f + "\n")
872 try:
873 try:
873 fctx = ctx[f]
874 fctx = ctx[f]
874 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
875 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
875 changed)
876 changed)
876 m1.set(f, fctx.flags())
877 m1.set(f, fctx.flags())
877 except (OSError, IOError):
878 except (OSError, IOError):
878 if error:
879 if error:
879 self.ui.warn(_("trouble committing %s!\n") % f)
880 self.ui.warn(_("trouble committing %s!\n") % f)
880 raise
881 raise
881 else:
882 else:
882 removed.append(f)
883 removed.append(f)
883
884
884 # update manifest
885 # update manifest
885 m1.update(new)
886 m1.update(new)
886 removed = [f for f in sorted(removed) if f in m1 or f in m2]
887 removed = [f for f in sorted(removed) if f in m1 or f in m2]
887 drop = [f for f in removed if f in m1]
888 drop = [f for f in removed if f in m1]
888 for f in drop:
889 for f in drop:
889 del m1[f]
890 del m1[f]
890 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
891 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
891 p2.manifestnode(), (new, drop))
892 p2.manifestnode(), (new, drop))
892
893
893 # update changelog
894 # update changelog
894 self.changelog.delayupdate()
895 self.changelog.delayupdate()
895 n = self.changelog.add(mn, changed + removed, ctx.description(),
896 n = self.changelog.add(mn, changed + removed, ctx.description(),
896 trp, p1.node(), p2.node(),
897 trp, p1.node(), p2.node(),
897 user, ctx.date(), ctx.extra().copy())
898 user, ctx.date(), ctx.extra().copy())
898 p = lambda: self.changelog.writepending() and self.root or ""
899 p = lambda: self.changelog.writepending() and self.root or ""
899 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
900 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
900 parent2=xp2, pending=p)
901 parent2=xp2, pending=p)
901 self.changelog.finalize(trp)
902 self.changelog.finalize(trp)
902 tr.close()
903 tr.close()
903
904
904 if self.branchcache:
905 if self.branchcache:
905 self.branchtags()
906 self.branchtags()
906
907
907 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
908 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
908 return n
909 return n
909 finally:
910 finally:
910 del tr
911 del tr
911 lock.release()
912 lock.release()
912
913
914 def destroyed(self):
915 '''Inform the repository that nodes have been destroyed.
916 Intended for use by strip and rollback, so there's a common
917 place for anything that has to be done after destroying history.'''
918 # Do nothing for now: this is a placeholder that will be used
919 # when we add tag caching.
920 # XXX it might be nice if we could take the list of destroyed
921 # nodes, but I don't see an easy way for rollback() to do that
922 pass
923
913 def walk(self, match, node=None):
924 def walk(self, match, node=None):
914 '''
925 '''
915 walk recursively through the directory tree or a given
926 walk recursively through the directory tree or a given
916 changeset, finding all files matched by the match
927 changeset, finding all files matched by the match
917 function
928 function
918 '''
929 '''
919 return self[node].walk(match)
930 return self[node].walk(match)
920
931
921 def status(self, node1='.', node2=None, match=None,
932 def status(self, node1='.', node2=None, match=None,
922 ignored=False, clean=False, unknown=False):
933 ignored=False, clean=False, unknown=False):
923 """return status of files between two nodes or node and working directory
934 """return status of files between two nodes or node and working directory
924
935
925 If node1 is None, use the first dirstate parent instead.
936 If node1 is None, use the first dirstate parent instead.
926 If node2 is None, compare node1 with working directory.
937 If node2 is None, compare node1 with working directory.
927 """
938 """
928
939
929 def mfmatches(ctx):
940 def mfmatches(ctx):
930 mf = ctx.manifest().copy()
941 mf = ctx.manifest().copy()
931 for fn in mf.keys():
942 for fn in mf.keys():
932 if not match(fn):
943 if not match(fn):
933 del mf[fn]
944 del mf[fn]
934 return mf
945 return mf
935
946
936 if isinstance(node1, context.changectx):
947 if isinstance(node1, context.changectx):
937 ctx1 = node1
948 ctx1 = node1
938 else:
949 else:
939 ctx1 = self[node1]
950 ctx1 = self[node1]
940 if isinstance(node2, context.changectx):
951 if isinstance(node2, context.changectx):
941 ctx2 = node2
952 ctx2 = node2
942 else:
953 else:
943 ctx2 = self[node2]
954 ctx2 = self[node2]
944
955
945 working = ctx2.rev() is None
956 working = ctx2.rev() is None
946 parentworking = working and ctx1 == self['.']
957 parentworking = working and ctx1 == self['.']
947 match = match or match_.always(self.root, self.getcwd())
958 match = match or match_.always(self.root, self.getcwd())
948 listignored, listclean, listunknown = ignored, clean, unknown
959 listignored, listclean, listunknown = ignored, clean, unknown
949
960
950 # load earliest manifest first for caching reasons
961 # load earliest manifest first for caching reasons
951 if not working and ctx2.rev() < ctx1.rev():
962 if not working and ctx2.rev() < ctx1.rev():
952 ctx2.manifest()
963 ctx2.manifest()
953
964
954 if not parentworking:
965 if not parentworking:
955 def bad(f, msg):
966 def bad(f, msg):
956 if f not in ctx1:
967 if f not in ctx1:
957 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
968 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
958 match.bad = bad
969 match.bad = bad
959
970
960 if working: # we need to scan the working dir
971 if working: # we need to scan the working dir
961 s = self.dirstate.status(match, listignored, listclean, listunknown)
972 s = self.dirstate.status(match, listignored, listclean, listunknown)
962 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
973 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
963
974
964 # check for any possibly clean files
975 # check for any possibly clean files
965 if parentworking and cmp:
976 if parentworking and cmp:
966 fixup = []
977 fixup = []
967 # do a full compare of any files that might have changed
978 # do a full compare of any files that might have changed
968 for f in sorted(cmp):
979 for f in sorted(cmp):
969 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
980 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
970 or ctx1[f].cmp(ctx2[f].data())):
981 or ctx1[f].cmp(ctx2[f].data())):
971 modified.append(f)
982 modified.append(f)
972 else:
983 else:
973 fixup.append(f)
984 fixup.append(f)
974
985
975 if listclean:
986 if listclean:
976 clean += fixup
987 clean += fixup
977
988
978 # update dirstate for files that are actually clean
989 # update dirstate for files that are actually clean
979 if fixup:
990 if fixup:
980 try:
991 try:
981 # updating the dirstate is optional
992 # updating the dirstate is optional
982 # so we don't wait on the lock
993 # so we don't wait on the lock
983 wlock = self.wlock(False)
994 wlock = self.wlock(False)
984 try:
995 try:
985 for f in fixup:
996 for f in fixup:
986 self.dirstate.normal(f)
997 self.dirstate.normal(f)
987 finally:
998 finally:
988 wlock.release()
999 wlock.release()
989 except error.LockError:
1000 except error.LockError:
990 pass
1001 pass
991
1002
992 if not parentworking:
1003 if not parentworking:
993 mf1 = mfmatches(ctx1)
1004 mf1 = mfmatches(ctx1)
994 if working:
1005 if working:
995 # we are comparing working dir against non-parent
1006 # we are comparing working dir against non-parent
996 # generate a pseudo-manifest for the working dir
1007 # generate a pseudo-manifest for the working dir
997 mf2 = mfmatches(self['.'])
1008 mf2 = mfmatches(self['.'])
998 for f in cmp + modified + added:
1009 for f in cmp + modified + added:
999 mf2[f] = None
1010 mf2[f] = None
1000 mf2.set(f, ctx2.flags(f))
1011 mf2.set(f, ctx2.flags(f))
1001 for f in removed:
1012 for f in removed:
1002 if f in mf2:
1013 if f in mf2:
1003 del mf2[f]
1014 del mf2[f]
1004 else:
1015 else:
1005 # we are comparing two revisions
1016 # we are comparing two revisions
1006 deleted, unknown, ignored = [], [], []
1017 deleted, unknown, ignored = [], [], []
1007 mf2 = mfmatches(ctx2)
1018 mf2 = mfmatches(ctx2)
1008
1019
1009 modified, added, clean = [], [], []
1020 modified, added, clean = [], [], []
1010 for fn in mf2:
1021 for fn in mf2:
1011 if fn in mf1:
1022 if fn in mf1:
1012 if (mf1.flags(fn) != mf2.flags(fn) or
1023 if (mf1.flags(fn) != mf2.flags(fn) or
1013 (mf1[fn] != mf2[fn] and
1024 (mf1[fn] != mf2[fn] and
1014 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1025 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1015 modified.append(fn)
1026 modified.append(fn)
1016 elif listclean:
1027 elif listclean:
1017 clean.append(fn)
1028 clean.append(fn)
1018 del mf1[fn]
1029 del mf1[fn]
1019 else:
1030 else:
1020 added.append(fn)
1031 added.append(fn)
1021 removed = mf1.keys()
1032 removed = mf1.keys()
1022
1033
1023 r = modified, added, removed, deleted, unknown, ignored, clean
1034 r = modified, added, removed, deleted, unknown, ignored, clean
1024 [l.sort() for l in r]
1035 [l.sort() for l in r]
1025 return r
1036 return r
1026
1037
1027 def add(self, list):
1038 def add(self, list):
1028 wlock = self.wlock()
1039 wlock = self.wlock()
1029 try:
1040 try:
1030 rejected = []
1041 rejected = []
1031 for f in list:
1042 for f in list:
1032 p = self.wjoin(f)
1043 p = self.wjoin(f)
1033 try:
1044 try:
1034 st = os.lstat(p)
1045 st = os.lstat(p)
1035 except:
1046 except:
1036 self.ui.warn(_("%s does not exist!\n") % f)
1047 self.ui.warn(_("%s does not exist!\n") % f)
1037 rejected.append(f)
1048 rejected.append(f)
1038 continue
1049 continue
1039 if st.st_size > 10000000:
1050 if st.st_size > 10000000:
1040 self.ui.warn(_("%s: files over 10MB may cause memory and"
1051 self.ui.warn(_("%s: files over 10MB may cause memory and"
1041 " performance problems\n"
1052 " performance problems\n"
1042 "(use 'hg revert %s' to unadd the file)\n")
1053 "(use 'hg revert %s' to unadd the file)\n")
1043 % (f, f))
1054 % (f, f))
1044 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1055 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1045 self.ui.warn(_("%s not added: only files and symlinks "
1056 self.ui.warn(_("%s not added: only files and symlinks "
1046 "supported currently\n") % f)
1057 "supported currently\n") % f)
1047 rejected.append(p)
1058 rejected.append(p)
1048 elif self.dirstate[f] in 'amn':
1059 elif self.dirstate[f] in 'amn':
1049 self.ui.warn(_("%s already tracked!\n") % f)
1060 self.ui.warn(_("%s already tracked!\n") % f)
1050 elif self.dirstate[f] == 'r':
1061 elif self.dirstate[f] == 'r':
1051 self.dirstate.normallookup(f)
1062 self.dirstate.normallookup(f)
1052 else:
1063 else:
1053 self.dirstate.add(f)
1064 self.dirstate.add(f)
1054 return rejected
1065 return rejected
1055 finally:
1066 finally:
1056 wlock.release()
1067 wlock.release()
1057
1068
1058 def forget(self, list):
1069 def forget(self, list):
1059 wlock = self.wlock()
1070 wlock = self.wlock()
1060 try:
1071 try:
1061 for f in list:
1072 for f in list:
1062 if self.dirstate[f] != 'a':
1073 if self.dirstate[f] != 'a':
1063 self.ui.warn(_("%s not added!\n") % f)
1074 self.ui.warn(_("%s not added!\n") % f)
1064 else:
1075 else:
1065 self.dirstate.forget(f)
1076 self.dirstate.forget(f)
1066 finally:
1077 finally:
1067 wlock.release()
1078 wlock.release()
1068
1079
1069 def remove(self, list, unlink=False):
1080 def remove(self, list, unlink=False):
1070 if unlink:
1081 if unlink:
1071 for f in list:
1082 for f in list:
1072 try:
1083 try:
1073 util.unlink(self.wjoin(f))
1084 util.unlink(self.wjoin(f))
1074 except OSError, inst:
1085 except OSError, inst:
1075 if inst.errno != errno.ENOENT:
1086 if inst.errno != errno.ENOENT:
1076 raise
1087 raise
1077 wlock = self.wlock()
1088 wlock = self.wlock()
1078 try:
1089 try:
1079 for f in list:
1090 for f in list:
1080 if unlink and os.path.exists(self.wjoin(f)):
1091 if unlink and os.path.exists(self.wjoin(f)):
1081 self.ui.warn(_("%s still exists!\n") % f)
1092 self.ui.warn(_("%s still exists!\n") % f)
1082 elif self.dirstate[f] == 'a':
1093 elif self.dirstate[f] == 'a':
1083 self.dirstate.forget(f)
1094 self.dirstate.forget(f)
1084 elif f not in self.dirstate:
1095 elif f not in self.dirstate:
1085 self.ui.warn(_("%s not tracked!\n") % f)
1096 self.ui.warn(_("%s not tracked!\n") % f)
1086 else:
1097 else:
1087 self.dirstate.remove(f)
1098 self.dirstate.remove(f)
1088 finally:
1099 finally:
1089 wlock.release()
1100 wlock.release()
1090
1101
1091 def undelete(self, list):
1102 def undelete(self, list):
1092 manifests = [self.manifest.read(self.changelog.read(p)[0])
1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1093 for p in self.dirstate.parents() if p != nullid]
1104 for p in self.dirstate.parents() if p != nullid]
1094 wlock = self.wlock()
1105 wlock = self.wlock()
1095 try:
1106 try:
1096 for f in list:
1107 for f in list:
1097 if self.dirstate[f] != 'r':
1108 if self.dirstate[f] != 'r':
1098 self.ui.warn(_("%s not removed!\n") % f)
1109 self.ui.warn(_("%s not removed!\n") % f)
1099 else:
1110 else:
1100 m = f in manifests[0] and manifests[0] or manifests[1]
1111 m = f in manifests[0] and manifests[0] or manifests[1]
1101 t = self.file(f).read(m[f])
1112 t = self.file(f).read(m[f])
1102 self.wwrite(f, t, m.flags(f))
1113 self.wwrite(f, t, m.flags(f))
1103 self.dirstate.normal(f)
1114 self.dirstate.normal(f)
1104 finally:
1115 finally:
1105 wlock.release()
1116 wlock.release()
1106
1117
1107 def copy(self, source, dest):
1118 def copy(self, source, dest):
1108 p = self.wjoin(dest)
1119 p = self.wjoin(dest)
1109 if not (os.path.exists(p) or os.path.islink(p)):
1120 if not (os.path.exists(p) or os.path.islink(p)):
1110 self.ui.warn(_("%s does not exist!\n") % dest)
1121 self.ui.warn(_("%s does not exist!\n") % dest)
1111 elif not (os.path.isfile(p) or os.path.islink(p)):
1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1112 self.ui.warn(_("copy failed: %s is not a file or a "
1123 self.ui.warn(_("copy failed: %s is not a file or a "
1113 "symbolic link\n") % dest)
1124 "symbolic link\n") % dest)
1114 else:
1125 else:
1115 wlock = self.wlock()
1126 wlock = self.wlock()
1116 try:
1127 try:
1117 if self.dirstate[dest] in '?r':
1128 if self.dirstate[dest] in '?r':
1118 self.dirstate.add(dest)
1129 self.dirstate.add(dest)
1119 self.dirstate.copy(source, dest)
1130 self.dirstate.copy(source, dest)
1120 finally:
1131 finally:
1121 wlock.release()
1132 wlock.release()
1122
1133
1123 def heads(self, start=None):
1134 def heads(self, start=None):
1124 heads = self.changelog.heads(start)
1135 heads = self.changelog.heads(start)
1125 # sort the output in rev descending order
1136 # sort the output in rev descending order
1126 heads = [(-self.changelog.rev(h), h) for h in heads]
1137 heads = [(-self.changelog.rev(h), h) for h in heads]
1127 return [n for (r, n) in sorted(heads)]
1138 return [n for (r, n) in sorted(heads)]
1128
1139
1129 def branchheads(self, branch=None, start=None, closed=False):
1140 def branchheads(self, branch=None, start=None, closed=False):
1130 if branch is None:
1141 if branch is None:
1131 branch = self[None].branch()
1142 branch = self[None].branch()
1132 branches = self.branchmap()
1143 branches = self.branchmap()
1133 if branch not in branches:
1144 if branch not in branches:
1134 return []
1145 return []
1135 bheads = branches[branch]
1146 bheads = branches[branch]
1136 # the cache returns heads ordered lowest to highest
1147 # the cache returns heads ordered lowest to highest
1137 bheads.reverse()
1148 bheads.reverse()
1138 if start is not None:
1149 if start is not None:
1139 # filter out the heads that cannot be reached from startrev
1150 # filter out the heads that cannot be reached from startrev
1140 bheads = self.changelog.nodesbetween([start], bheads)[2]
1151 bheads = self.changelog.nodesbetween([start], bheads)[2]
1141 if not closed:
1152 if not closed:
1142 bheads = [h for h in bheads if
1153 bheads = [h for h in bheads if
1143 ('close' not in self.changelog.read(h)[5])]
1154 ('close' not in self.changelog.read(h)[5])]
1144 return bheads
1155 return bheads
1145
1156
1146 def branches(self, nodes):
1157 def branches(self, nodes):
1147 if not nodes:
1158 if not nodes:
1148 nodes = [self.changelog.tip()]
1159 nodes = [self.changelog.tip()]
1149 b = []
1160 b = []
1150 for n in nodes:
1161 for n in nodes:
1151 t = n
1162 t = n
1152 while 1:
1163 while 1:
1153 p = self.changelog.parents(n)
1164 p = self.changelog.parents(n)
1154 if p[1] != nullid or p[0] == nullid:
1165 if p[1] != nullid or p[0] == nullid:
1155 b.append((t, n, p[0], p[1]))
1166 b.append((t, n, p[0], p[1]))
1156 break
1167 break
1157 n = p[0]
1168 n = p[0]
1158 return b
1169 return b
1159
1170
1160 def between(self, pairs):
1171 def between(self, pairs):
1161 r = []
1172 r = []
1162
1173
1163 for top, bottom in pairs:
1174 for top, bottom in pairs:
1164 n, l, i = top, [], 0
1175 n, l, i = top, [], 0
1165 f = 1
1176 f = 1
1166
1177
1167 while n != bottom and n != nullid:
1178 while n != bottom and n != nullid:
1168 p = self.changelog.parents(n)[0]
1179 p = self.changelog.parents(n)[0]
1169 if i == f:
1180 if i == f:
1170 l.append(n)
1181 l.append(n)
1171 f = f * 2
1182 f = f * 2
1172 n = p
1183 n = p
1173 i += 1
1184 i += 1
1174
1185
1175 r.append(l)
1186 r.append(l)
1176
1187
1177 return r
1188 return r
1178
1189
1179 def findincoming(self, remote, base=None, heads=None, force=False):
1190 def findincoming(self, remote, base=None, heads=None, force=False):
1180 """Return list of roots of the subsets of missing nodes from remote
1191 """Return list of roots of the subsets of missing nodes from remote
1181
1192
1182 If base dict is specified, assume that these nodes and their parents
1193 If base dict is specified, assume that these nodes and their parents
1183 exist on the remote side and that no child of a node of base exists
1194 exist on the remote side and that no child of a node of base exists
1184 in both remote and self.
1195 in both remote and self.
1185 Furthermore base will be updated to include the nodes that exists
1196 Furthermore base will be updated to include the nodes that exists
1186 in self and remote but no children exists in self and remote.
1197 in self and remote but no children exists in self and remote.
1187 If a list of heads is specified, return only nodes which are heads
1198 If a list of heads is specified, return only nodes which are heads
1188 or ancestors of these heads.
1199 or ancestors of these heads.
1189
1200
1190 All the ancestors of base are in self and in remote.
1201 All the ancestors of base are in self and in remote.
1191 All the descendants of the list returned are missing in self.
1202 All the descendants of the list returned are missing in self.
1192 (and so we know that the rest of the nodes are missing in remote, see
1203 (and so we know that the rest of the nodes are missing in remote, see
1193 outgoing)
1204 outgoing)
1194 """
1205 """
1195 return self.findcommonincoming(remote, base, heads, force)[1]
1206 return self.findcommonincoming(remote, base, heads, force)[1]
1196
1207
1197 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1208 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1198 """Return a tuple (common, missing roots, heads) used to identify
1209 """Return a tuple (common, missing roots, heads) used to identify
1199 missing nodes from remote.
1210 missing nodes from remote.
1200
1211
1201 If base dict is specified, assume that these nodes and their parents
1212 If base dict is specified, assume that these nodes and their parents
1202 exist on the remote side and that no child of a node of base exists
1213 exist on the remote side and that no child of a node of base exists
1203 in both remote and self.
1214 in both remote and self.
1204 Furthermore base will be updated to include the nodes that exists
1215 Furthermore base will be updated to include the nodes that exists
1205 in self and remote but no children exists in self and remote.
1216 in self and remote but no children exists in self and remote.
1206 If a list of heads is specified, return only nodes which are heads
1217 If a list of heads is specified, return only nodes which are heads
1207 or ancestors of these heads.
1218 or ancestors of these heads.
1208
1219
1209 All the ancestors of base are in self and in remote.
1220 All the ancestors of base are in self and in remote.
1210 """
1221 """
1211 m = self.changelog.nodemap
1222 m = self.changelog.nodemap
1212 search = []
1223 search = []
1213 fetch = set()
1224 fetch = set()
1214 seen = set()
1225 seen = set()
1215 seenbranch = set()
1226 seenbranch = set()
1216 if base is None:
1227 if base is None:
1217 base = {}
1228 base = {}
1218
1229
1219 if not heads:
1230 if not heads:
1220 heads = remote.heads()
1231 heads = remote.heads()
1221
1232
1222 if self.changelog.tip() == nullid:
1233 if self.changelog.tip() == nullid:
1223 base[nullid] = 1
1234 base[nullid] = 1
1224 if heads != [nullid]:
1235 if heads != [nullid]:
1225 return [nullid], [nullid], list(heads)
1236 return [nullid], [nullid], list(heads)
1226 return [nullid], [], []
1237 return [nullid], [], []
1227
1238
1228 # assume we're closer to the tip than the root
1239 # assume we're closer to the tip than the root
1229 # and start by examining the heads
1240 # and start by examining the heads
1230 self.ui.status(_("searching for changes\n"))
1241 self.ui.status(_("searching for changes\n"))
1231
1242
1232 unknown = []
1243 unknown = []
1233 for h in heads:
1244 for h in heads:
1234 if h not in m:
1245 if h not in m:
1235 unknown.append(h)
1246 unknown.append(h)
1236 else:
1247 else:
1237 base[h] = 1
1248 base[h] = 1
1238
1249
1239 heads = unknown
1250 heads = unknown
1240 if not unknown:
1251 if not unknown:
1241 return base.keys(), [], []
1252 return base.keys(), [], []
1242
1253
1243 req = set(unknown)
1254 req = set(unknown)
1244 reqcnt = 0
1255 reqcnt = 0
1245
1256
1246 # search through remote branches
1257 # search through remote branches
1247 # a 'branch' here is a linear segment of history, with four parts:
1258 # a 'branch' here is a linear segment of history, with four parts:
1248 # head, root, first parent, second parent
1259 # head, root, first parent, second parent
1249 # (a branch always has two parents (or none) by definition)
1260 # (a branch always has two parents (or none) by definition)
1250 unknown = remote.branches(unknown)
1261 unknown = remote.branches(unknown)
1251 while unknown:
1262 while unknown:
1252 r = []
1263 r = []
1253 while unknown:
1264 while unknown:
1254 n = unknown.pop(0)
1265 n = unknown.pop(0)
1255 if n[0] in seen:
1266 if n[0] in seen:
1256 continue
1267 continue
1257
1268
1258 self.ui.debug(_("examining %s:%s\n")
1269 self.ui.debug(_("examining %s:%s\n")
1259 % (short(n[0]), short(n[1])))
1270 % (short(n[0]), short(n[1])))
1260 if n[0] == nullid: # found the end of the branch
1271 if n[0] == nullid: # found the end of the branch
1261 pass
1272 pass
1262 elif n in seenbranch:
1273 elif n in seenbranch:
1263 self.ui.debug(_("branch already found\n"))
1274 self.ui.debug(_("branch already found\n"))
1264 continue
1275 continue
1265 elif n[1] and n[1] in m: # do we know the base?
1276 elif n[1] and n[1] in m: # do we know the base?
1266 self.ui.debug(_("found incomplete branch %s:%s\n")
1277 self.ui.debug(_("found incomplete branch %s:%s\n")
1267 % (short(n[0]), short(n[1])))
1278 % (short(n[0]), short(n[1])))
1268 search.append(n[0:2]) # schedule branch range for scanning
1279 search.append(n[0:2]) # schedule branch range for scanning
1269 seenbranch.add(n)
1280 seenbranch.add(n)
1270 else:
1281 else:
1271 if n[1] not in seen and n[1] not in fetch:
1282 if n[1] not in seen and n[1] not in fetch:
1272 if n[2] in m and n[3] in m:
1283 if n[2] in m and n[3] in m:
1273 self.ui.debug(_("found new changeset %s\n") %
1284 self.ui.debug(_("found new changeset %s\n") %
1274 short(n[1]))
1285 short(n[1]))
1275 fetch.add(n[1]) # earliest unknown
1286 fetch.add(n[1]) # earliest unknown
1276 for p in n[2:4]:
1287 for p in n[2:4]:
1277 if p in m:
1288 if p in m:
1278 base[p] = 1 # latest known
1289 base[p] = 1 # latest known
1279
1290
1280 for p in n[2:4]:
1291 for p in n[2:4]:
1281 if p not in req and p not in m:
1292 if p not in req and p not in m:
1282 r.append(p)
1293 r.append(p)
1283 req.add(p)
1294 req.add(p)
1284 seen.add(n[0])
1295 seen.add(n[0])
1285
1296
1286 if r:
1297 if r:
1287 reqcnt += 1
1298 reqcnt += 1
1288 self.ui.debug(_("request %d: %s\n") %
1299 self.ui.debug(_("request %d: %s\n") %
1289 (reqcnt, " ".join(map(short, r))))
1300 (reqcnt, " ".join(map(short, r))))
1290 for p in xrange(0, len(r), 10):
1301 for p in xrange(0, len(r), 10):
1291 for b in remote.branches(r[p:p+10]):
1302 for b in remote.branches(r[p:p+10]):
1292 self.ui.debug(_("received %s:%s\n") %
1303 self.ui.debug(_("received %s:%s\n") %
1293 (short(b[0]), short(b[1])))
1304 (short(b[0]), short(b[1])))
1294 unknown.append(b)
1305 unknown.append(b)
1295
1306
1296 # do binary search on the branches we found
1307 # do binary search on the branches we found
1297 while search:
1308 while search:
1298 newsearch = []
1309 newsearch = []
1299 reqcnt += 1
1310 reqcnt += 1
1300 for n, l in zip(search, remote.between(search)):
1311 for n, l in zip(search, remote.between(search)):
1301 l.append(n[1])
1312 l.append(n[1])
1302 p = n[0]
1313 p = n[0]
1303 f = 1
1314 f = 1
1304 for i in l:
1315 for i in l:
1305 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1316 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1306 if i in m:
1317 if i in m:
1307 if f <= 2:
1318 if f <= 2:
1308 self.ui.debug(_("found new branch changeset %s\n") %
1319 self.ui.debug(_("found new branch changeset %s\n") %
1309 short(p))
1320 short(p))
1310 fetch.add(p)
1321 fetch.add(p)
1311 base[i] = 1
1322 base[i] = 1
1312 else:
1323 else:
1313 self.ui.debug(_("narrowed branch search to %s:%s\n")
1324 self.ui.debug(_("narrowed branch search to %s:%s\n")
1314 % (short(p), short(i)))
1325 % (short(p), short(i)))
1315 newsearch.append((p, i))
1326 newsearch.append((p, i))
1316 break
1327 break
1317 p, f = i, f * 2
1328 p, f = i, f * 2
1318 search = newsearch
1329 search = newsearch
1319
1330
1320 # sanity check our fetch list
1331 # sanity check our fetch list
1321 for f in fetch:
1332 for f in fetch:
1322 if f in m:
1333 if f in m:
1323 raise error.RepoError(_("already have changeset ")
1334 raise error.RepoError(_("already have changeset ")
1324 + short(f[:4]))
1335 + short(f[:4]))
1325
1336
1326 if base.keys() == [nullid]:
1337 if base.keys() == [nullid]:
1327 if force:
1338 if force:
1328 self.ui.warn(_("warning: repository is unrelated\n"))
1339 self.ui.warn(_("warning: repository is unrelated\n"))
1329 else:
1340 else:
1330 raise util.Abort(_("repository is unrelated"))
1341 raise util.Abort(_("repository is unrelated"))
1331
1342
1332 self.ui.debug(_("found new changesets starting at ") +
1343 self.ui.debug(_("found new changesets starting at ") +
1333 " ".join([short(f) for f in fetch]) + "\n")
1344 " ".join([short(f) for f in fetch]) + "\n")
1334
1345
1335 self.ui.debug(_("%d total queries\n") % reqcnt)
1346 self.ui.debug(_("%d total queries\n") % reqcnt)
1336
1347
1337 return base.keys(), list(fetch), heads
1348 return base.keys(), list(fetch), heads
1338
1349
1339 def findoutgoing(self, remote, base=None, heads=None, force=False):
1350 def findoutgoing(self, remote, base=None, heads=None, force=False):
1340 """Return list of nodes that are roots of subsets not in remote
1351 """Return list of nodes that are roots of subsets not in remote
1341
1352
1342 If base dict is specified, assume that these nodes and their parents
1353 If base dict is specified, assume that these nodes and their parents
1343 exist on the remote side.
1354 exist on the remote side.
1344 If a list of heads is specified, return only nodes which are heads
1355 If a list of heads is specified, return only nodes which are heads
1345 or ancestors of these heads, and return a second element which
1356 or ancestors of these heads, and return a second element which
1346 contains all remote heads which get new children.
1357 contains all remote heads which get new children.
1347 """
1358 """
1348 if base is None:
1359 if base is None:
1349 base = {}
1360 base = {}
1350 self.findincoming(remote, base, heads, force=force)
1361 self.findincoming(remote, base, heads, force=force)
1351
1362
1352 self.ui.debug(_("common changesets up to ")
1363 self.ui.debug(_("common changesets up to ")
1353 + " ".join(map(short, base.keys())) + "\n")
1364 + " ".join(map(short, base.keys())) + "\n")
1354
1365
1355 remain = set(self.changelog.nodemap)
1366 remain = set(self.changelog.nodemap)
1356
1367
1357 # prune everything remote has from the tree
1368 # prune everything remote has from the tree
1358 remain.remove(nullid)
1369 remain.remove(nullid)
1359 remove = base.keys()
1370 remove = base.keys()
1360 while remove:
1371 while remove:
1361 n = remove.pop(0)
1372 n = remove.pop(0)
1362 if n in remain:
1373 if n in remain:
1363 remain.remove(n)
1374 remain.remove(n)
1364 for p in self.changelog.parents(n):
1375 for p in self.changelog.parents(n):
1365 remove.append(p)
1376 remove.append(p)
1366
1377
1367 # find every node whose parents have been pruned
1378 # find every node whose parents have been pruned
1368 subset = []
1379 subset = []
1369 # find every remote head that will get new children
1380 # find every remote head that will get new children
1370 updated_heads = set()
1381 updated_heads = set()
1371 for n in remain:
1382 for n in remain:
1372 p1, p2 = self.changelog.parents(n)
1383 p1, p2 = self.changelog.parents(n)
1373 if p1 not in remain and p2 not in remain:
1384 if p1 not in remain and p2 not in remain:
1374 subset.append(n)
1385 subset.append(n)
1375 if heads:
1386 if heads:
1376 if p1 in heads:
1387 if p1 in heads:
1377 updated_heads.add(p1)
1388 updated_heads.add(p1)
1378 if p2 in heads:
1389 if p2 in heads:
1379 updated_heads.add(p2)
1390 updated_heads.add(p2)
1380
1391
1381 # this is the set of all roots we have to push
1392 # this is the set of all roots we have to push
1382 if heads:
1393 if heads:
1383 return subset, list(updated_heads)
1394 return subset, list(updated_heads)
1384 else:
1395 else:
1385 return subset
1396 return subset
1386
1397
1387 def pull(self, remote, heads=None, force=False):
1398 def pull(self, remote, heads=None, force=False):
1388 lock = self.lock()
1399 lock = self.lock()
1389 try:
1400 try:
1390 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1401 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1391 force=force)
1402 force=force)
1392 if fetch == [nullid]:
1403 if fetch == [nullid]:
1393 self.ui.status(_("requesting all changes\n"))
1404 self.ui.status(_("requesting all changes\n"))
1394
1405
1395 if not fetch:
1406 if not fetch:
1396 self.ui.status(_("no changes found\n"))
1407 self.ui.status(_("no changes found\n"))
1397 return 0
1408 return 0
1398
1409
1399 if heads is None and remote.capable('changegroupsubset'):
1410 if heads is None and remote.capable('changegroupsubset'):
1400 heads = rheads
1411 heads = rheads
1401
1412
1402 if heads is None:
1413 if heads is None:
1403 cg = remote.changegroup(fetch, 'pull')
1414 cg = remote.changegroup(fetch, 'pull')
1404 else:
1415 else:
1405 if not remote.capable('changegroupsubset'):
1416 if not remote.capable('changegroupsubset'):
1406 raise util.Abort(_("Partial pull cannot be done because "
1417 raise util.Abort(_("Partial pull cannot be done because "
1407 "other repository doesn't support "
1418 "other repository doesn't support "
1408 "changegroupsubset."))
1419 "changegroupsubset."))
1409 cg = remote.changegroupsubset(fetch, heads, 'pull')
1420 cg = remote.changegroupsubset(fetch, heads, 'pull')
1410 return self.addchangegroup(cg, 'pull', remote.url())
1421 return self.addchangegroup(cg, 'pull', remote.url())
1411 finally:
1422 finally:
1412 lock.release()
1423 lock.release()
1413
1424
1414 def push(self, remote, force=False, revs=None):
1425 def push(self, remote, force=False, revs=None):
1415 # there are two ways to push to remote repo:
1426 # there are two ways to push to remote repo:
1416 #
1427 #
1417 # addchangegroup assumes local user can lock remote
1428 # addchangegroup assumes local user can lock remote
1418 # repo (local filesystem, old ssh servers).
1429 # repo (local filesystem, old ssh servers).
1419 #
1430 #
1420 # unbundle assumes local user cannot lock remote repo (new ssh
1431 # unbundle assumes local user cannot lock remote repo (new ssh
1421 # servers, http servers).
1432 # servers, http servers).
1422
1433
1423 if remote.capable('unbundle'):
1434 if remote.capable('unbundle'):
1424 return self.push_unbundle(remote, force, revs)
1435 return self.push_unbundle(remote, force, revs)
1425 return self.push_addchangegroup(remote, force, revs)
1436 return self.push_addchangegroup(remote, force, revs)
1426
1437
1427 def prepush(self, remote, force, revs):
1438 def prepush(self, remote, force, revs):
1428 common = {}
1439 common = {}
1429 remote_heads = remote.heads()
1440 remote_heads = remote.heads()
1430 inc = self.findincoming(remote, common, remote_heads, force=force)
1441 inc = self.findincoming(remote, common, remote_heads, force=force)
1431
1442
1432 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1443 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1433 if revs is not None:
1444 if revs is not None:
1434 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1445 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1435 else:
1446 else:
1436 bases, heads = update, self.changelog.heads()
1447 bases, heads = update, self.changelog.heads()
1437
1448
1438 def checkbranch(lheads, rheads, updatelh):
1449 def checkbranch(lheads, rheads, updatelh):
1439 '''
1450 '''
1440 check whether there are more local heads than remote heads on
1451 check whether there are more local heads than remote heads on
1441 a specific branch.
1452 a specific branch.
1442
1453
1443 lheads: local branch heads
1454 lheads: local branch heads
1444 rheads: remote branch heads
1455 rheads: remote branch heads
1445 updatelh: outgoing local branch heads
1456 updatelh: outgoing local branch heads
1446 '''
1457 '''
1447
1458
1448 warn = 0
1459 warn = 0
1449
1460
1450 if not revs and len(lheads) > len(rheads):
1461 if not revs and len(lheads) > len(rheads):
1451 warn = 1
1462 warn = 1
1452 else:
1463 else:
1453 updatelheads = [self.changelog.heads(x, lheads)
1464 updatelheads = [self.changelog.heads(x, lheads)
1454 for x in updatelh]
1465 for x in updatelh]
1455 newheads = set(sum(updatelheads, [])) & set(lheads)
1466 newheads = set(sum(updatelheads, [])) & set(lheads)
1456
1467
1457 if not newheads:
1468 if not newheads:
1458 return True
1469 return True
1459
1470
1460 for r in rheads:
1471 for r in rheads:
1461 if r in self.changelog.nodemap:
1472 if r in self.changelog.nodemap:
1462 desc = self.changelog.heads(r, heads)
1473 desc = self.changelog.heads(r, heads)
1463 l = [h for h in heads if h in desc]
1474 l = [h for h in heads if h in desc]
1464 if not l:
1475 if not l:
1465 newheads.add(r)
1476 newheads.add(r)
1466 else:
1477 else:
1467 newheads.add(r)
1478 newheads.add(r)
1468 if len(newheads) > len(rheads):
1479 if len(newheads) > len(rheads):
1469 warn = 1
1480 warn = 1
1470
1481
1471 if warn:
1482 if warn:
1472 if not rheads: # new branch requires --force
1483 if not rheads: # new branch requires --force
1473 self.ui.warn(_("abort: push creates new"
1484 self.ui.warn(_("abort: push creates new"
1474 " remote branch '%s'!\n") %
1485 " remote branch '%s'!\n") %
1475 self[updatelh[0]].branch())
1486 self[updatelh[0]].branch())
1476 else:
1487 else:
1477 self.ui.warn(_("abort: push creates new remote heads!\n"))
1488 self.ui.warn(_("abort: push creates new remote heads!\n"))
1478
1489
1479 self.ui.status(_("(did you forget to merge?"
1490 self.ui.status(_("(did you forget to merge?"
1480 " use push -f to force)\n"))
1491 " use push -f to force)\n"))
1481 return False
1492 return False
1482 return True
1493 return True
1483
1494
1484 if not bases:
1495 if not bases:
1485 self.ui.status(_("no changes found\n"))
1496 self.ui.status(_("no changes found\n"))
1486 return None, 1
1497 return None, 1
1487 elif not force:
1498 elif not force:
1488 # Check for each named branch if we're creating new remote heads.
1499 # Check for each named branch if we're creating new remote heads.
1489 # To be a remote head after push, node must be either:
1500 # To be a remote head after push, node must be either:
1490 # - unknown locally
1501 # - unknown locally
1491 # - a local outgoing head descended from update
1502 # - a local outgoing head descended from update
1492 # - a remote head that's known locally and not
1503 # - a remote head that's known locally and not
1493 # ancestral to an outgoing head
1504 # ancestral to an outgoing head
1494 #
1505 #
1495 # New named branches cannot be created without --force.
1506 # New named branches cannot be created without --force.
1496
1507
1497 if remote_heads != [nullid]:
1508 if remote_heads != [nullid]:
1498 if remote.capable('branchmap'):
1509 if remote.capable('branchmap'):
1499 localhds = {}
1510 localhds = {}
1500 if not revs:
1511 if not revs:
1501 localhds = self.branchmap()
1512 localhds = self.branchmap()
1502 else:
1513 else:
1503 for n in heads:
1514 for n in heads:
1504 branch = self[n].branch()
1515 branch = self[n].branch()
1505 if branch in localhds:
1516 if branch in localhds:
1506 localhds[branch].append(n)
1517 localhds[branch].append(n)
1507 else:
1518 else:
1508 localhds[branch] = [n]
1519 localhds[branch] = [n]
1509
1520
1510 remotehds = remote.branchmap()
1521 remotehds = remote.branchmap()
1511
1522
1512 for lh in localhds:
1523 for lh in localhds:
1513 if lh in remotehds:
1524 if lh in remotehds:
1514 rheads = remotehds[lh]
1525 rheads = remotehds[lh]
1515 else:
1526 else:
1516 rheads = []
1527 rheads = []
1517 lheads = localhds[lh]
1528 lheads = localhds[lh]
1518 updatelh = [upd for upd in update
1529 updatelh = [upd for upd in update
1519 if self[upd].branch() == lh]
1530 if self[upd].branch() == lh]
1520 if not updatelh:
1531 if not updatelh:
1521 continue
1532 continue
1522 if not checkbranch(lheads, rheads, updatelh):
1533 if not checkbranch(lheads, rheads, updatelh):
1523 return None, 0
1534 return None, 0
1524 else:
1535 else:
1525 if not checkbranch(heads, remote_heads, update):
1536 if not checkbranch(heads, remote_heads, update):
1526 return None, 0
1537 return None, 0
1527
1538
1528 if inc:
1539 if inc:
1529 self.ui.warn(_("note: unsynced remote changes!\n"))
1540 self.ui.warn(_("note: unsynced remote changes!\n"))
1530
1541
1531
1542
1532 if revs is None:
1543 if revs is None:
1533 # use the fast path, no race possible on push
1544 # use the fast path, no race possible on push
1534 cg = self._changegroup(common.keys(), 'push')
1545 cg = self._changegroup(common.keys(), 'push')
1535 else:
1546 else:
1536 cg = self.changegroupsubset(update, revs, 'push')
1547 cg = self.changegroupsubset(update, revs, 'push')
1537 return cg, remote_heads
1548 return cg, remote_heads
1538
1549
1539 def push_addchangegroup(self, remote, force, revs):
1550 def push_addchangegroup(self, remote, force, revs):
1540 lock = remote.lock()
1551 lock = remote.lock()
1541 try:
1552 try:
1542 ret = self.prepush(remote, force, revs)
1553 ret = self.prepush(remote, force, revs)
1543 if ret[0] is not None:
1554 if ret[0] is not None:
1544 cg, remote_heads = ret
1555 cg, remote_heads = ret
1545 return remote.addchangegroup(cg, 'push', self.url())
1556 return remote.addchangegroup(cg, 'push', self.url())
1546 return ret[1]
1557 return ret[1]
1547 finally:
1558 finally:
1548 lock.release()
1559 lock.release()
1549
1560
1550 def push_unbundle(self, remote, force, revs):
1561 def push_unbundle(self, remote, force, revs):
1551 # local repo finds heads on server, finds out what revs it
1562 # local repo finds heads on server, finds out what revs it
1552 # must push. once revs transferred, if server finds it has
1563 # must push. once revs transferred, if server finds it has
1553 # different heads (someone else won commit/push race), server
1564 # different heads (someone else won commit/push race), server
1554 # aborts.
1565 # aborts.
1555
1566
1556 ret = self.prepush(remote, force, revs)
1567 ret = self.prepush(remote, force, revs)
1557 if ret[0] is not None:
1568 if ret[0] is not None:
1558 cg, remote_heads = ret
1569 cg, remote_heads = ret
1559 if force: remote_heads = ['force']
1570 if force: remote_heads = ['force']
1560 return remote.unbundle(cg, remote_heads, 'push')
1571 return remote.unbundle(cg, remote_heads, 'push')
1561 return ret[1]
1572 return ret[1]
1562
1573
1563 def changegroupinfo(self, nodes, source):
1574 def changegroupinfo(self, nodes, source):
1564 if self.ui.verbose or source == 'bundle':
1575 if self.ui.verbose or source == 'bundle':
1565 self.ui.status(_("%d changesets found\n") % len(nodes))
1576 self.ui.status(_("%d changesets found\n") % len(nodes))
1566 if self.ui.debugflag:
1577 if self.ui.debugflag:
1567 self.ui.debug(_("list of changesets:\n"))
1578 self.ui.debug(_("list of changesets:\n"))
1568 for node in nodes:
1579 for node in nodes:
1569 self.ui.debug("%s\n" % hex(node))
1580 self.ui.debug("%s\n" % hex(node))
1570
1581
1571 def changegroupsubset(self, bases, heads, source, extranodes=None):
1582 def changegroupsubset(self, bases, heads, source, extranodes=None):
1572 """This function generates a changegroup consisting of all the nodes
1583 """This function generates a changegroup consisting of all the nodes
1573 that are descendents of any of the bases, and ancestors of any of
1584 that are descendents of any of the bases, and ancestors of any of
1574 the heads.
1585 the heads.
1575
1586
1576 It is fairly complex as determining which filenodes and which
1587 It is fairly complex as determining which filenodes and which
1577 manifest nodes need to be included for the changeset to be complete
1588 manifest nodes need to be included for the changeset to be complete
1578 is non-trivial.
1589 is non-trivial.
1579
1590
1580 Another wrinkle is doing the reverse, figuring out which changeset in
1591 Another wrinkle is doing the reverse, figuring out which changeset in
1581 the changegroup a particular filenode or manifestnode belongs to.
1592 the changegroup a particular filenode or manifestnode belongs to.
1582
1593
1583 The caller can specify some nodes that must be included in the
1594 The caller can specify some nodes that must be included in the
1584 changegroup using the extranodes argument. It should be a dict
1595 changegroup using the extranodes argument. It should be a dict
1585 where the keys are the filenames (or 1 for the manifest), and the
1596 where the keys are the filenames (or 1 for the manifest), and the
1586 values are lists of (node, linknode) tuples, where node is a wanted
1597 values are lists of (node, linknode) tuples, where node is a wanted
1587 node and linknode is the changelog node that should be transmitted as
1598 node and linknode is the changelog node that should be transmitted as
1588 the linkrev.
1599 the linkrev.
1589 """
1600 """
1590
1601
1591 if extranodes is None:
1602 if extranodes is None:
1592 # can we go through the fast path ?
1603 # can we go through the fast path ?
1593 heads.sort()
1604 heads.sort()
1594 allheads = self.heads()
1605 allheads = self.heads()
1595 allheads.sort()
1606 allheads.sort()
1596 if heads == allheads:
1607 if heads == allheads:
1597 common = []
1608 common = []
1598 # parents of bases are known from both sides
1609 # parents of bases are known from both sides
1599 for n in bases:
1610 for n in bases:
1600 for p in self.changelog.parents(n):
1611 for p in self.changelog.parents(n):
1601 if p != nullid:
1612 if p != nullid:
1602 common.append(p)
1613 common.append(p)
1603 return self._changegroup(common, source)
1614 return self._changegroup(common, source)
1604
1615
1605 self.hook('preoutgoing', throw=True, source=source)
1616 self.hook('preoutgoing', throw=True, source=source)
1606
1617
1607 # Set up some initial variables
1618 # Set up some initial variables
1608 # Make it easy to refer to self.changelog
1619 # Make it easy to refer to self.changelog
1609 cl = self.changelog
1620 cl = self.changelog
1610 # msng is short for missing - compute the list of changesets in this
1621 # msng is short for missing - compute the list of changesets in this
1611 # changegroup.
1622 # changegroup.
1612 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1623 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1613 self.changegroupinfo(msng_cl_lst, source)
1624 self.changegroupinfo(msng_cl_lst, source)
1614 # Some bases may turn out to be superfluous, and some heads may be
1625 # Some bases may turn out to be superfluous, and some heads may be
1615 # too. nodesbetween will return the minimal set of bases and heads
1626 # too. nodesbetween will return the minimal set of bases and heads
1616 # necessary to re-create the changegroup.
1627 # necessary to re-create the changegroup.
1617
1628
1618 # Known heads are the list of heads that it is assumed the recipient
1629 # Known heads are the list of heads that it is assumed the recipient
1619 # of this changegroup will know about.
1630 # of this changegroup will know about.
1620 knownheads = set()
1631 knownheads = set()
1621 # We assume that all parents of bases are known heads.
1632 # We assume that all parents of bases are known heads.
1622 for n in bases:
1633 for n in bases:
1623 knownheads.update(cl.parents(n))
1634 knownheads.update(cl.parents(n))
1624 knownheads.discard(nullid)
1635 knownheads.discard(nullid)
1625 knownheads = list(knownheads)
1636 knownheads = list(knownheads)
1626 if knownheads:
1637 if knownheads:
1627 # Now that we know what heads are known, we can compute which
1638 # Now that we know what heads are known, we can compute which
1628 # changesets are known. The recipient must know about all
1639 # changesets are known. The recipient must know about all
1629 # changesets required to reach the known heads from the null
1640 # changesets required to reach the known heads from the null
1630 # changeset.
1641 # changeset.
1631 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1642 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1632 junk = None
1643 junk = None
1633 # Transform the list into a set.
1644 # Transform the list into a set.
1634 has_cl_set = set(has_cl_set)
1645 has_cl_set = set(has_cl_set)
1635 else:
1646 else:
1636 # If there were no known heads, the recipient cannot be assumed to
1647 # If there were no known heads, the recipient cannot be assumed to
1637 # know about any changesets.
1648 # know about any changesets.
1638 has_cl_set = set()
1649 has_cl_set = set()
1639
1650
1640 # Make it easy to refer to self.manifest
1651 # Make it easy to refer to self.manifest
1641 mnfst = self.manifest
1652 mnfst = self.manifest
1642 # We don't know which manifests are missing yet
1653 # We don't know which manifests are missing yet
1643 msng_mnfst_set = {}
1654 msng_mnfst_set = {}
1644 # Nor do we know which filenodes are missing.
1655 # Nor do we know which filenodes are missing.
1645 msng_filenode_set = {}
1656 msng_filenode_set = {}
1646
1657
1647 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1658 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1648 junk = None
1659 junk = None
1649
1660
1650 # A changeset always belongs to itself, so the changenode lookup
1661 # A changeset always belongs to itself, so the changenode lookup
1651 # function for a changenode is identity.
1662 # function for a changenode is identity.
1652 def identity(x):
1663 def identity(x):
1653 return x
1664 return x
1654
1665
1655 # If we determine that a particular file or manifest node must be a
1666 # If we determine that a particular file or manifest node must be a
1656 # node that the recipient of the changegroup will already have, we can
1667 # node that the recipient of the changegroup will already have, we can
1657 # also assume the recipient will have all the parents. This function
1668 # also assume the recipient will have all the parents. This function
1658 # prunes them from the set of missing nodes.
1669 # prunes them from the set of missing nodes.
1659 def prune_parents(revlog, hasset, msngset):
1670 def prune_parents(revlog, hasset, msngset):
1660 haslst = list(hasset)
1671 haslst = list(hasset)
1661 haslst.sort(key=revlog.rev)
1672 haslst.sort(key=revlog.rev)
1662 for node in haslst:
1673 for node in haslst:
1663 parentlst = [p for p in revlog.parents(node) if p != nullid]
1674 parentlst = [p for p in revlog.parents(node) if p != nullid]
1664 while parentlst:
1675 while parentlst:
1665 n = parentlst.pop()
1676 n = parentlst.pop()
1666 if n not in hasset:
1677 if n not in hasset:
1667 hasset.add(n)
1678 hasset.add(n)
1668 p = [p for p in revlog.parents(n) if p != nullid]
1679 p = [p for p in revlog.parents(n) if p != nullid]
1669 parentlst.extend(p)
1680 parentlst.extend(p)
1670 for n in hasset:
1681 for n in hasset:
1671 msngset.pop(n, None)
1682 msngset.pop(n, None)
1672
1683
1673 # This is a function generating function used to set up an environment
1684 # This is a function generating function used to set up an environment
1674 # for the inner function to execute in.
1685 # for the inner function to execute in.
1675 def manifest_and_file_collector(changedfileset):
1686 def manifest_and_file_collector(changedfileset):
1676 # This is an information gathering function that gathers
1687 # This is an information gathering function that gathers
1677 # information from each changeset node that goes out as part of
1688 # information from each changeset node that goes out as part of
1678 # the changegroup. The information gathered is a list of which
1689 # the changegroup. The information gathered is a list of which
1679 # manifest nodes are potentially required (the recipient may
1690 # manifest nodes are potentially required (the recipient may
1680 # already have them) and total list of all files which were
1691 # already have them) and total list of all files which were
1681 # changed in any changeset in the changegroup.
1692 # changed in any changeset in the changegroup.
1682 #
1693 #
1683 # We also remember the first changenode we saw any manifest
1694 # We also remember the first changenode we saw any manifest
1684 # referenced by so we can later determine which changenode 'owns'
1695 # referenced by so we can later determine which changenode 'owns'
1685 # the manifest.
1696 # the manifest.
1686 def collect_manifests_and_files(clnode):
1697 def collect_manifests_and_files(clnode):
1687 c = cl.read(clnode)
1698 c = cl.read(clnode)
1688 for f in c[3]:
1699 for f in c[3]:
1689 # This is to make sure we only have one instance of each
1700 # This is to make sure we only have one instance of each
1690 # filename string for each filename.
1701 # filename string for each filename.
1691 changedfileset.setdefault(f, f)
1702 changedfileset.setdefault(f, f)
1692 msng_mnfst_set.setdefault(c[0], clnode)
1703 msng_mnfst_set.setdefault(c[0], clnode)
1693 return collect_manifests_and_files
1704 return collect_manifests_and_files
1694
1705
1695 # Figure out which manifest nodes (of the ones we think might be part
1706 # Figure out which manifest nodes (of the ones we think might be part
1696 # of the changegroup) the recipient must know about and remove them
1707 # of the changegroup) the recipient must know about and remove them
1697 # from the changegroup.
1708 # from the changegroup.
1698 def prune_manifests():
1709 def prune_manifests():
1699 has_mnfst_set = set()
1710 has_mnfst_set = set()
1700 for n in msng_mnfst_set:
1711 for n in msng_mnfst_set:
1701 # If a 'missing' manifest thinks it belongs to a changenode
1712 # If a 'missing' manifest thinks it belongs to a changenode
1702 # the recipient is assumed to have, obviously the recipient
1713 # the recipient is assumed to have, obviously the recipient
1703 # must have that manifest.
1714 # must have that manifest.
1704 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1715 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1705 if linknode in has_cl_set:
1716 if linknode in has_cl_set:
1706 has_mnfst_set.add(n)
1717 has_mnfst_set.add(n)
1707 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1718 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1708
1719
1709 # Use the information collected in collect_manifests_and_files to say
1720 # Use the information collected in collect_manifests_and_files to say
1710 # which changenode any manifestnode belongs to.
1721 # which changenode any manifestnode belongs to.
1711 def lookup_manifest_link(mnfstnode):
1722 def lookup_manifest_link(mnfstnode):
1712 return msng_mnfst_set[mnfstnode]
1723 return msng_mnfst_set[mnfstnode]
1713
1724
1714 # A function generating function that sets up the initial environment
1725 # A function generating function that sets up the initial environment
1715 # the inner function.
1726 # the inner function.
1716 def filenode_collector(changedfiles):
1727 def filenode_collector(changedfiles):
1717 next_rev = [0]
1728 next_rev = [0]
1718 # This gathers information from each manifestnode included in the
1729 # This gathers information from each manifestnode included in the
1719 # changegroup about which filenodes the manifest node references
1730 # changegroup about which filenodes the manifest node references
1720 # so we can include those in the changegroup too.
1731 # so we can include those in the changegroup too.
1721 #
1732 #
1722 # It also remembers which changenode each filenode belongs to. It
1733 # It also remembers which changenode each filenode belongs to. It
1723 # does this by assuming the a filenode belongs to the changenode
1734 # does this by assuming the a filenode belongs to the changenode
1724 # the first manifest that references it belongs to.
1735 # the first manifest that references it belongs to.
1725 def collect_msng_filenodes(mnfstnode):
1736 def collect_msng_filenodes(mnfstnode):
1726 r = mnfst.rev(mnfstnode)
1737 r = mnfst.rev(mnfstnode)
1727 if r == next_rev[0]:
1738 if r == next_rev[0]:
1728 # If the last rev we looked at was the one just previous,
1739 # If the last rev we looked at was the one just previous,
1729 # we only need to see a diff.
1740 # we only need to see a diff.
1730 deltamf = mnfst.readdelta(mnfstnode)
1741 deltamf = mnfst.readdelta(mnfstnode)
1731 # For each line in the delta
1742 # For each line in the delta
1732 for f, fnode in deltamf.iteritems():
1743 for f, fnode in deltamf.iteritems():
1733 f = changedfiles.get(f, None)
1744 f = changedfiles.get(f, None)
1734 # And if the file is in the list of files we care
1745 # And if the file is in the list of files we care
1735 # about.
1746 # about.
1736 if f is not None:
1747 if f is not None:
1737 # Get the changenode this manifest belongs to
1748 # Get the changenode this manifest belongs to
1738 clnode = msng_mnfst_set[mnfstnode]
1749 clnode = msng_mnfst_set[mnfstnode]
1739 # Create the set of filenodes for the file if
1750 # Create the set of filenodes for the file if
1740 # there isn't one already.
1751 # there isn't one already.
1741 ndset = msng_filenode_set.setdefault(f, {})
1752 ndset = msng_filenode_set.setdefault(f, {})
1742 # And set the filenode's changelog node to the
1753 # And set the filenode's changelog node to the
1743 # manifest's if it hasn't been set already.
1754 # manifest's if it hasn't been set already.
1744 ndset.setdefault(fnode, clnode)
1755 ndset.setdefault(fnode, clnode)
1745 else:
1756 else:
1746 # Otherwise we need a full manifest.
1757 # Otherwise we need a full manifest.
1747 m = mnfst.read(mnfstnode)
1758 m = mnfst.read(mnfstnode)
1748 # For every file in we care about.
1759 # For every file in we care about.
1749 for f in changedfiles:
1760 for f in changedfiles:
1750 fnode = m.get(f, None)
1761 fnode = m.get(f, None)
1751 # If it's in the manifest
1762 # If it's in the manifest
1752 if fnode is not None:
1763 if fnode is not None:
1753 # See comments above.
1764 # See comments above.
1754 clnode = msng_mnfst_set[mnfstnode]
1765 clnode = msng_mnfst_set[mnfstnode]
1755 ndset = msng_filenode_set.setdefault(f, {})
1766 ndset = msng_filenode_set.setdefault(f, {})
1756 ndset.setdefault(fnode, clnode)
1767 ndset.setdefault(fnode, clnode)
1757 # Remember the revision we hope to see next.
1768 # Remember the revision we hope to see next.
1758 next_rev[0] = r + 1
1769 next_rev[0] = r + 1
1759 return collect_msng_filenodes
1770 return collect_msng_filenodes
1760
1771
1761 # We have a list of filenodes we think we need for a file, lets remove
1772 # We have a list of filenodes we think we need for a file, lets remove
1762 # all those we know the recipient must have.
1773 # all those we know the recipient must have.
1763 def prune_filenodes(f, filerevlog):
1774 def prune_filenodes(f, filerevlog):
1764 msngset = msng_filenode_set[f]
1775 msngset = msng_filenode_set[f]
1765 hasset = set()
1776 hasset = set()
1766 # If a 'missing' filenode thinks it belongs to a changenode we
1777 # If a 'missing' filenode thinks it belongs to a changenode we
1767 # assume the recipient must have, then the recipient must have
1778 # assume the recipient must have, then the recipient must have
1768 # that filenode.
1779 # that filenode.
1769 for n in msngset:
1780 for n in msngset:
1770 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1781 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1771 if clnode in has_cl_set:
1782 if clnode in has_cl_set:
1772 hasset.add(n)
1783 hasset.add(n)
1773 prune_parents(filerevlog, hasset, msngset)
1784 prune_parents(filerevlog, hasset, msngset)
1774
1785
1775 # A function generator function that sets up the a context for the
1786 # A function generator function that sets up the a context for the
1776 # inner function.
1787 # inner function.
1777 def lookup_filenode_link_func(fname):
1788 def lookup_filenode_link_func(fname):
1778 msngset = msng_filenode_set[fname]
1789 msngset = msng_filenode_set[fname]
1779 # Lookup the changenode the filenode belongs to.
1790 # Lookup the changenode the filenode belongs to.
1780 def lookup_filenode_link(fnode):
1791 def lookup_filenode_link(fnode):
1781 return msngset[fnode]
1792 return msngset[fnode]
1782 return lookup_filenode_link
1793 return lookup_filenode_link
1783
1794
1784 # Add the nodes that were explicitly requested.
1795 # Add the nodes that were explicitly requested.
1785 def add_extra_nodes(name, nodes):
1796 def add_extra_nodes(name, nodes):
1786 if not extranodes or name not in extranodes:
1797 if not extranodes or name not in extranodes:
1787 return
1798 return
1788
1799
1789 for node, linknode in extranodes[name]:
1800 for node, linknode in extranodes[name]:
1790 if node not in nodes:
1801 if node not in nodes:
1791 nodes[node] = linknode
1802 nodes[node] = linknode
1792
1803
1793 # Now that we have all theses utility functions to help out and
1804 # Now that we have all theses utility functions to help out and
1794 # logically divide up the task, generate the group.
1805 # logically divide up the task, generate the group.
1795 def gengroup():
1806 def gengroup():
1796 # The set of changed files starts empty.
1807 # The set of changed files starts empty.
1797 changedfiles = {}
1808 changedfiles = {}
1798 # Create a changenode group generator that will call our functions
1809 # Create a changenode group generator that will call our functions
1799 # back to lookup the owning changenode and collect information.
1810 # back to lookup the owning changenode and collect information.
1800 group = cl.group(msng_cl_lst, identity,
1811 group = cl.group(msng_cl_lst, identity,
1801 manifest_and_file_collector(changedfiles))
1812 manifest_and_file_collector(changedfiles))
1802 for chnk in group:
1813 for chnk in group:
1803 yield chnk
1814 yield chnk
1804
1815
1805 # The list of manifests has been collected by the generator
1816 # The list of manifests has been collected by the generator
1806 # calling our functions back.
1817 # calling our functions back.
1807 prune_manifests()
1818 prune_manifests()
1808 add_extra_nodes(1, msng_mnfst_set)
1819 add_extra_nodes(1, msng_mnfst_set)
1809 msng_mnfst_lst = msng_mnfst_set.keys()
1820 msng_mnfst_lst = msng_mnfst_set.keys()
1810 # Sort the manifestnodes by revision number.
1821 # Sort the manifestnodes by revision number.
1811 msng_mnfst_lst.sort(key=mnfst.rev)
1822 msng_mnfst_lst.sort(key=mnfst.rev)
1812 # Create a generator for the manifestnodes that calls our lookup
1823 # Create a generator for the manifestnodes that calls our lookup
1813 # and data collection functions back.
1824 # and data collection functions back.
1814 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1825 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1815 filenode_collector(changedfiles))
1826 filenode_collector(changedfiles))
1816 for chnk in group:
1827 for chnk in group:
1817 yield chnk
1828 yield chnk
1818
1829
1819 # These are no longer needed, dereference and toss the memory for
1830 # These are no longer needed, dereference and toss the memory for
1820 # them.
1831 # them.
1821 msng_mnfst_lst = None
1832 msng_mnfst_lst = None
1822 msng_mnfst_set.clear()
1833 msng_mnfst_set.clear()
1823
1834
1824 if extranodes:
1835 if extranodes:
1825 for fname in extranodes:
1836 for fname in extranodes:
1826 if isinstance(fname, int):
1837 if isinstance(fname, int):
1827 continue
1838 continue
1828 msng_filenode_set.setdefault(fname, {})
1839 msng_filenode_set.setdefault(fname, {})
1829 changedfiles[fname] = 1
1840 changedfiles[fname] = 1
1830 # Go through all our files in order sorted by name.
1841 # Go through all our files in order sorted by name.
1831 for fname in sorted(changedfiles):
1842 for fname in sorted(changedfiles):
1832 filerevlog = self.file(fname)
1843 filerevlog = self.file(fname)
1833 if not len(filerevlog):
1844 if not len(filerevlog):
1834 raise util.Abort(_("empty or missing revlog for %s") % fname)
1845 raise util.Abort(_("empty or missing revlog for %s") % fname)
1835 # Toss out the filenodes that the recipient isn't really
1846 # Toss out the filenodes that the recipient isn't really
1836 # missing.
1847 # missing.
1837 if fname in msng_filenode_set:
1848 if fname in msng_filenode_set:
1838 prune_filenodes(fname, filerevlog)
1849 prune_filenodes(fname, filerevlog)
1839 add_extra_nodes(fname, msng_filenode_set[fname])
1850 add_extra_nodes(fname, msng_filenode_set[fname])
1840 msng_filenode_lst = msng_filenode_set[fname].keys()
1851 msng_filenode_lst = msng_filenode_set[fname].keys()
1841 else:
1852 else:
1842 msng_filenode_lst = []
1853 msng_filenode_lst = []
1843 # If any filenodes are left, generate the group for them,
1854 # If any filenodes are left, generate the group for them,
1844 # otherwise don't bother.
1855 # otherwise don't bother.
1845 if len(msng_filenode_lst) > 0:
1856 if len(msng_filenode_lst) > 0:
1846 yield changegroup.chunkheader(len(fname))
1857 yield changegroup.chunkheader(len(fname))
1847 yield fname
1858 yield fname
1848 # Sort the filenodes by their revision #
1859 # Sort the filenodes by their revision #
1849 msng_filenode_lst.sort(key=filerevlog.rev)
1860 msng_filenode_lst.sort(key=filerevlog.rev)
1850 # Create a group generator and only pass in a changenode
1861 # Create a group generator and only pass in a changenode
1851 # lookup function as we need to collect no information
1862 # lookup function as we need to collect no information
1852 # from filenodes.
1863 # from filenodes.
1853 group = filerevlog.group(msng_filenode_lst,
1864 group = filerevlog.group(msng_filenode_lst,
1854 lookup_filenode_link_func(fname))
1865 lookup_filenode_link_func(fname))
1855 for chnk in group:
1866 for chnk in group:
1856 yield chnk
1867 yield chnk
1857 if fname in msng_filenode_set:
1868 if fname in msng_filenode_set:
1858 # Don't need this anymore, toss it to free memory.
1869 # Don't need this anymore, toss it to free memory.
1859 del msng_filenode_set[fname]
1870 del msng_filenode_set[fname]
1860 # Signal that no more groups are left.
1871 # Signal that no more groups are left.
1861 yield changegroup.closechunk()
1872 yield changegroup.closechunk()
1862
1873
1863 if msng_cl_lst:
1874 if msng_cl_lst:
1864 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1875 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1865
1876
1866 return util.chunkbuffer(gengroup())
1877 return util.chunkbuffer(gengroup())
1867
1878
1868 def changegroup(self, basenodes, source):
1879 def changegroup(self, basenodes, source):
1869 # to avoid a race we use changegroupsubset() (issue1320)
1880 # to avoid a race we use changegroupsubset() (issue1320)
1870 return self.changegroupsubset(basenodes, self.heads(), source)
1881 return self.changegroupsubset(basenodes, self.heads(), source)
1871
1882
1872 def _changegroup(self, common, source):
1883 def _changegroup(self, common, source):
1873 """Generate a changegroup of all nodes that we have that a recipient
1884 """Generate a changegroup of all nodes that we have that a recipient
1874 doesn't.
1885 doesn't.
1875
1886
1876 This is much easier than the previous function as we can assume that
1887 This is much easier than the previous function as we can assume that
1877 the recipient has any changenode we aren't sending them.
1888 the recipient has any changenode we aren't sending them.
1878
1889
1879 common is the set of common nodes between remote and self"""
1890 common is the set of common nodes between remote and self"""
1880
1891
1881 self.hook('preoutgoing', throw=True, source=source)
1892 self.hook('preoutgoing', throw=True, source=source)
1882
1893
1883 cl = self.changelog
1894 cl = self.changelog
1884 nodes = cl.findmissing(common)
1895 nodes = cl.findmissing(common)
1885 revset = set([cl.rev(n) for n in nodes])
1896 revset = set([cl.rev(n) for n in nodes])
1886 self.changegroupinfo(nodes, source)
1897 self.changegroupinfo(nodes, source)
1887
1898
1888 def identity(x):
1899 def identity(x):
1889 return x
1900 return x
1890
1901
1891 def gennodelst(log):
1902 def gennodelst(log):
1892 for r in log:
1903 for r in log:
1893 if log.linkrev(r) in revset:
1904 if log.linkrev(r) in revset:
1894 yield log.node(r)
1905 yield log.node(r)
1895
1906
1896 def changed_file_collector(changedfileset):
1907 def changed_file_collector(changedfileset):
1897 def collect_changed_files(clnode):
1908 def collect_changed_files(clnode):
1898 c = cl.read(clnode)
1909 c = cl.read(clnode)
1899 changedfileset.update(c[3])
1910 changedfileset.update(c[3])
1900 return collect_changed_files
1911 return collect_changed_files
1901
1912
1902 def lookuprevlink_func(revlog):
1913 def lookuprevlink_func(revlog):
1903 def lookuprevlink(n):
1914 def lookuprevlink(n):
1904 return cl.node(revlog.linkrev(revlog.rev(n)))
1915 return cl.node(revlog.linkrev(revlog.rev(n)))
1905 return lookuprevlink
1916 return lookuprevlink
1906
1917
1907 def gengroup():
1918 def gengroup():
1908 # construct a list of all changed files
1919 # construct a list of all changed files
1909 changedfiles = set()
1920 changedfiles = set()
1910
1921
1911 for chnk in cl.group(nodes, identity,
1922 for chnk in cl.group(nodes, identity,
1912 changed_file_collector(changedfiles)):
1923 changed_file_collector(changedfiles)):
1913 yield chnk
1924 yield chnk
1914
1925
1915 mnfst = self.manifest
1926 mnfst = self.manifest
1916 nodeiter = gennodelst(mnfst)
1927 nodeiter = gennodelst(mnfst)
1917 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1928 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1918 yield chnk
1929 yield chnk
1919
1930
1920 for fname in sorted(changedfiles):
1931 for fname in sorted(changedfiles):
1921 filerevlog = self.file(fname)
1932 filerevlog = self.file(fname)
1922 if not len(filerevlog):
1933 if not len(filerevlog):
1923 raise util.Abort(_("empty or missing revlog for %s") % fname)
1934 raise util.Abort(_("empty or missing revlog for %s") % fname)
1924 nodeiter = gennodelst(filerevlog)
1935 nodeiter = gennodelst(filerevlog)
1925 nodeiter = list(nodeiter)
1936 nodeiter = list(nodeiter)
1926 if nodeiter:
1937 if nodeiter:
1927 yield changegroup.chunkheader(len(fname))
1938 yield changegroup.chunkheader(len(fname))
1928 yield fname
1939 yield fname
1929 lookup = lookuprevlink_func(filerevlog)
1940 lookup = lookuprevlink_func(filerevlog)
1930 for chnk in filerevlog.group(nodeiter, lookup):
1941 for chnk in filerevlog.group(nodeiter, lookup):
1931 yield chnk
1942 yield chnk
1932
1943
1933 yield changegroup.closechunk()
1944 yield changegroup.closechunk()
1934
1945
1935 if nodes:
1946 if nodes:
1936 self.hook('outgoing', node=hex(nodes[0]), source=source)
1947 self.hook('outgoing', node=hex(nodes[0]), source=source)
1937
1948
1938 return util.chunkbuffer(gengroup())
1949 return util.chunkbuffer(gengroup())
1939
1950
1940 def addchangegroup(self, source, srctype, url, emptyok=False):
1951 def addchangegroup(self, source, srctype, url, emptyok=False):
1941 """add changegroup to repo.
1952 """add changegroup to repo.
1942
1953
1943 return values:
1954 return values:
1944 - nothing changed or no source: 0
1955 - nothing changed or no source: 0
1945 - more heads than before: 1+added heads (2..n)
1956 - more heads than before: 1+added heads (2..n)
1946 - less heads than before: -1-removed heads (-2..-n)
1957 - less heads than before: -1-removed heads (-2..-n)
1947 - number of heads stays the same: 1
1958 - number of heads stays the same: 1
1948 """
1959 """
1949 def csmap(x):
1960 def csmap(x):
1950 self.ui.debug(_("add changeset %s\n") % short(x))
1961 self.ui.debug(_("add changeset %s\n") % short(x))
1951 return len(cl)
1962 return len(cl)
1952
1963
1953 def revmap(x):
1964 def revmap(x):
1954 return cl.rev(x)
1965 return cl.rev(x)
1955
1966
1956 if not source:
1967 if not source:
1957 return 0
1968 return 0
1958
1969
1959 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1970 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1960
1971
1961 changesets = files = revisions = 0
1972 changesets = files = revisions = 0
1962
1973
1963 # write changelog data to temp files so concurrent readers will not see
1974 # write changelog data to temp files so concurrent readers will not see
1964 # inconsistent view
1975 # inconsistent view
1965 cl = self.changelog
1976 cl = self.changelog
1966 cl.delayupdate()
1977 cl.delayupdate()
1967 oldheads = len(cl.heads())
1978 oldheads = len(cl.heads())
1968
1979
1969 tr = self.transaction()
1980 tr = self.transaction()
1970 try:
1981 try:
1971 trp = weakref.proxy(tr)
1982 trp = weakref.proxy(tr)
1972 # pull off the changeset group
1983 # pull off the changeset group
1973 self.ui.status(_("adding changesets\n"))
1984 self.ui.status(_("adding changesets\n"))
1974 clstart = len(cl)
1985 clstart = len(cl)
1975 chunkiter = changegroup.chunkiter(source)
1986 chunkiter = changegroup.chunkiter(source)
1976 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1987 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1977 raise util.Abort(_("received changelog group is empty"))
1988 raise util.Abort(_("received changelog group is empty"))
1978 clend = len(cl)
1989 clend = len(cl)
1979 changesets = clend - clstart
1990 changesets = clend - clstart
1980
1991
1981 # pull off the manifest group
1992 # pull off the manifest group
1982 self.ui.status(_("adding manifests\n"))
1993 self.ui.status(_("adding manifests\n"))
1983 chunkiter = changegroup.chunkiter(source)
1994 chunkiter = changegroup.chunkiter(source)
1984 # no need to check for empty manifest group here:
1995 # no need to check for empty manifest group here:
1985 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1996 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1986 # no new manifest will be created and the manifest group will
1997 # no new manifest will be created and the manifest group will
1987 # be empty during the pull
1998 # be empty during the pull
1988 self.manifest.addgroup(chunkiter, revmap, trp)
1999 self.manifest.addgroup(chunkiter, revmap, trp)
1989
2000
1990 # process the files
2001 # process the files
1991 self.ui.status(_("adding file changes\n"))
2002 self.ui.status(_("adding file changes\n"))
1992 while 1:
2003 while 1:
1993 f = changegroup.getchunk(source)
2004 f = changegroup.getchunk(source)
1994 if not f:
2005 if not f:
1995 break
2006 break
1996 self.ui.debug(_("adding %s revisions\n") % f)
2007 self.ui.debug(_("adding %s revisions\n") % f)
1997 fl = self.file(f)
2008 fl = self.file(f)
1998 o = len(fl)
2009 o = len(fl)
1999 chunkiter = changegroup.chunkiter(source)
2010 chunkiter = changegroup.chunkiter(source)
2000 if fl.addgroup(chunkiter, revmap, trp) is None:
2011 if fl.addgroup(chunkiter, revmap, trp) is None:
2001 raise util.Abort(_("received file revlog group is empty"))
2012 raise util.Abort(_("received file revlog group is empty"))
2002 revisions += len(fl) - o
2013 revisions += len(fl) - o
2003 files += 1
2014 files += 1
2004
2015
2005 newheads = len(cl.heads())
2016 newheads = len(cl.heads())
2006 heads = ""
2017 heads = ""
2007 if oldheads and newheads != oldheads:
2018 if oldheads and newheads != oldheads:
2008 heads = _(" (%+d heads)") % (newheads - oldheads)
2019 heads = _(" (%+d heads)") % (newheads - oldheads)
2009
2020
2010 self.ui.status(_("added %d changesets"
2021 self.ui.status(_("added %d changesets"
2011 " with %d changes to %d files%s\n")
2022 " with %d changes to %d files%s\n")
2012 % (changesets, revisions, files, heads))
2023 % (changesets, revisions, files, heads))
2013
2024
2014 if changesets > 0:
2025 if changesets > 0:
2015 p = lambda: cl.writepending() and self.root or ""
2026 p = lambda: cl.writepending() and self.root or ""
2016 self.hook('pretxnchangegroup', throw=True,
2027 self.hook('pretxnchangegroup', throw=True,
2017 node=hex(cl.node(clstart)), source=srctype,
2028 node=hex(cl.node(clstart)), source=srctype,
2018 url=url, pending=p)
2029 url=url, pending=p)
2019
2030
2020 # make changelog see real files again
2031 # make changelog see real files again
2021 cl.finalize(trp)
2032 cl.finalize(trp)
2022
2033
2023 tr.close()
2034 tr.close()
2024 finally:
2035 finally:
2025 del tr
2036 del tr
2026
2037
2027 if changesets > 0:
2038 if changesets > 0:
2028 # forcefully update the on-disk branch cache
2039 # forcefully update the on-disk branch cache
2029 self.ui.debug(_("updating the branch cache\n"))
2040 self.ui.debug(_("updating the branch cache\n"))
2030 self.branchtags()
2041 self.branchtags()
2031 self.hook("changegroup", node=hex(cl.node(clstart)),
2042 self.hook("changegroup", node=hex(cl.node(clstart)),
2032 source=srctype, url=url)
2043 source=srctype, url=url)
2033
2044
2034 for i in xrange(clstart, clend):
2045 for i in xrange(clstart, clend):
2035 self.hook("incoming", node=hex(cl.node(i)),
2046 self.hook("incoming", node=hex(cl.node(i)),
2036 source=srctype, url=url)
2047 source=srctype, url=url)
2037
2048
2038 # never return 0 here:
2049 # never return 0 here:
2039 if newheads < oldheads:
2050 if newheads < oldheads:
2040 return newheads - oldheads - 1
2051 return newheads - oldheads - 1
2041 else:
2052 else:
2042 return newheads - oldheads + 1
2053 return newheads - oldheads + 1
2043
2054
2044
2055
2045 def stream_in(self, remote):
2056 def stream_in(self, remote):
2046 fp = remote.stream_out()
2057 fp = remote.stream_out()
2047 l = fp.readline()
2058 l = fp.readline()
2048 try:
2059 try:
2049 resp = int(l)
2060 resp = int(l)
2050 except ValueError:
2061 except ValueError:
2051 raise error.ResponseError(
2062 raise error.ResponseError(
2052 _('Unexpected response from remote server:'), l)
2063 _('Unexpected response from remote server:'), l)
2053 if resp == 1:
2064 if resp == 1:
2054 raise util.Abort(_('operation forbidden by server'))
2065 raise util.Abort(_('operation forbidden by server'))
2055 elif resp == 2:
2066 elif resp == 2:
2056 raise util.Abort(_('locking the remote repository failed'))
2067 raise util.Abort(_('locking the remote repository failed'))
2057 elif resp != 0:
2068 elif resp != 0:
2058 raise util.Abort(_('the server sent an unknown error code'))
2069 raise util.Abort(_('the server sent an unknown error code'))
2059 self.ui.status(_('streaming all changes\n'))
2070 self.ui.status(_('streaming all changes\n'))
2060 l = fp.readline()
2071 l = fp.readline()
2061 try:
2072 try:
2062 total_files, total_bytes = map(int, l.split(' ', 1))
2073 total_files, total_bytes = map(int, l.split(' ', 1))
2063 except (ValueError, TypeError):
2074 except (ValueError, TypeError):
2064 raise error.ResponseError(
2075 raise error.ResponseError(
2065 _('Unexpected response from remote server:'), l)
2076 _('Unexpected response from remote server:'), l)
2066 self.ui.status(_('%d files to transfer, %s of data\n') %
2077 self.ui.status(_('%d files to transfer, %s of data\n') %
2067 (total_files, util.bytecount(total_bytes)))
2078 (total_files, util.bytecount(total_bytes)))
2068 start = time.time()
2079 start = time.time()
2069 for i in xrange(total_files):
2080 for i in xrange(total_files):
2070 # XXX doesn't support '\n' or '\r' in filenames
2081 # XXX doesn't support '\n' or '\r' in filenames
2071 l = fp.readline()
2082 l = fp.readline()
2072 try:
2083 try:
2073 name, size = l.split('\0', 1)
2084 name, size = l.split('\0', 1)
2074 size = int(size)
2085 size = int(size)
2075 except (ValueError, TypeError):
2086 except (ValueError, TypeError):
2076 raise error.ResponseError(
2087 raise error.ResponseError(
2077 _('Unexpected response from remote server:'), l)
2088 _('Unexpected response from remote server:'), l)
2078 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2089 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2079 # for backwards compat, name was partially encoded
2090 # for backwards compat, name was partially encoded
2080 ofp = self.sopener(store.decodedir(name), 'w')
2091 ofp = self.sopener(store.decodedir(name), 'w')
2081 for chunk in util.filechunkiter(fp, limit=size):
2092 for chunk in util.filechunkiter(fp, limit=size):
2082 ofp.write(chunk)
2093 ofp.write(chunk)
2083 ofp.close()
2094 ofp.close()
2084 elapsed = time.time() - start
2095 elapsed = time.time() - start
2085 if elapsed <= 0:
2096 if elapsed <= 0:
2086 elapsed = 0.001
2097 elapsed = 0.001
2087 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2098 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2088 (util.bytecount(total_bytes), elapsed,
2099 (util.bytecount(total_bytes), elapsed,
2089 util.bytecount(total_bytes / elapsed)))
2100 util.bytecount(total_bytes / elapsed)))
2090 self.invalidate()
2101 self.invalidate()
2091 return len(self.heads()) + 1
2102 return len(self.heads()) + 1
2092
2103
2093 def clone(self, remote, heads=[], stream=False):
2104 def clone(self, remote, heads=[], stream=False):
2094 '''clone remote repository.
2105 '''clone remote repository.
2095
2106
2096 keyword arguments:
2107 keyword arguments:
2097 heads: list of revs to clone (forces use of pull)
2108 heads: list of revs to clone (forces use of pull)
2098 stream: use streaming clone if possible'''
2109 stream: use streaming clone if possible'''
2099
2110
2100 # now, all clients that can request uncompressed clones can
2111 # now, all clients that can request uncompressed clones can
2101 # read repo formats supported by all servers that can serve
2112 # read repo formats supported by all servers that can serve
2102 # them.
2113 # them.
2103
2114
2104 # if revlog format changes, client will have to check version
2115 # if revlog format changes, client will have to check version
2105 # and format flags on "stream" capability, and use
2116 # and format flags on "stream" capability, and use
2106 # uncompressed only if compatible.
2117 # uncompressed only if compatible.
2107
2118
2108 if stream and not heads and remote.capable('stream'):
2119 if stream and not heads and remote.capable('stream'):
2109 return self.stream_in(remote)
2120 return self.stream_in(remote)
2110 return self.pull(remote, heads)
2121 return self.pull(remote, heads)
2111
2122
2112 # used to avoid circular references so destructors work
2123 # used to avoid circular references so destructors work
2113 def aftertrans(files):
2124 def aftertrans(files):
2114 renamefiles = [tuple(t) for t in files]
2125 renamefiles = [tuple(t) for t in files]
2115 def a():
2126 def a():
2116 for src, dest in renamefiles:
2127 for src, dest in renamefiles:
2117 util.rename(src, dest)
2128 util.rename(src, dest)
2118 return a
2129 return a
2119
2130
2120 def instance(ui, path, create):
2131 def instance(ui, path, create):
2121 return localrepository(ui, util.drop_scheme('file', path), create)
2132 return localrepository(ui, util.drop_scheme('file', path), create)
2122
2133
2123 def islocal(path):
2134 def islocal(path):
2124 return True
2135 return True
@@ -1,144 +1,145 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 import changegroup
9 import changegroup
10 from node import nullrev, short
10 from node import nullrev, short
11 from i18n import _
11 from i18n import _
12 import os
12 import os
13
13
14 def _bundle(repo, bases, heads, node, suffix, extranodes=None):
14 def _bundle(repo, bases, heads, node, suffix, extranodes=None):
15 """create a bundle with the specified revisions as a backup"""
15 """create a bundle with the specified revisions as a backup"""
16 cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
16 cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
17 backupdir = repo.join("strip-backup")
17 backupdir = repo.join("strip-backup")
18 if not os.path.isdir(backupdir):
18 if not os.path.isdir(backupdir):
19 os.mkdir(backupdir)
19 os.mkdir(backupdir)
20 name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
20 name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
21 repo.ui.warn(_("saving bundle to %s\n") % name)
21 repo.ui.warn(_("saving bundle to %s\n") % name)
22 return changegroup.writebundle(cg, name, "HG10BZ")
22 return changegroup.writebundle(cg, name, "HG10BZ")
23
23
24 def _collectfiles(repo, striprev):
24 def _collectfiles(repo, striprev):
25 """find out the filelogs affected by the strip"""
25 """find out the filelogs affected by the strip"""
26 files = set()
26 files = set()
27
27
28 for x in xrange(striprev, len(repo)):
28 for x in xrange(striprev, len(repo)):
29 files.update(repo[x].files())
29 files.update(repo[x].files())
30
30
31 return sorted(files)
31 return sorted(files)
32
32
33 def _collectextranodes(repo, files, link):
33 def _collectextranodes(repo, files, link):
34 """return the nodes that have to be saved before the strip"""
34 """return the nodes that have to be saved before the strip"""
35 def collectone(revlog):
35 def collectone(revlog):
36 extra = []
36 extra = []
37 startrev = count = len(revlog)
37 startrev = count = len(revlog)
38 # find the truncation point of the revlog
38 # find the truncation point of the revlog
39 for i in xrange(count):
39 for i in xrange(count):
40 lrev = revlog.linkrev(i)
40 lrev = revlog.linkrev(i)
41 if lrev >= link:
41 if lrev >= link:
42 startrev = i + 1
42 startrev = i + 1
43 break
43 break
44
44
45 # see if any revision after that point has a linkrev less than link
45 # see if any revision after that point has a linkrev less than link
46 # (we have to manually save these guys)
46 # (we have to manually save these guys)
47 for i in xrange(startrev, count):
47 for i in xrange(startrev, count):
48 node = revlog.node(i)
48 node = revlog.node(i)
49 lrev = revlog.linkrev(i)
49 lrev = revlog.linkrev(i)
50 if lrev < link:
50 if lrev < link:
51 extra.append((node, cl.node(lrev)))
51 extra.append((node, cl.node(lrev)))
52
52
53 return extra
53 return extra
54
54
55 extranodes = {}
55 extranodes = {}
56 cl = repo.changelog
56 cl = repo.changelog
57 extra = collectone(repo.manifest)
57 extra = collectone(repo.manifest)
58 if extra:
58 if extra:
59 extranodes[1] = extra
59 extranodes[1] = extra
60 for fname in files:
60 for fname in files:
61 f = repo.file(fname)
61 f = repo.file(fname)
62 extra = collectone(f)
62 extra = collectone(f)
63 if extra:
63 if extra:
64 extranodes[fname] = extra
64 extranodes[fname] = extra
65
65
66 return extranodes
66 return extranodes
67
67
68 def strip(ui, repo, node, backup="all"):
68 def strip(ui, repo, node, backup="all"):
69 cl = repo.changelog
69 cl = repo.changelog
70 # TODO delete the undo files, and handle undo of merge sets
70 # TODO delete the undo files, and handle undo of merge sets
71 striprev = cl.rev(node)
71 striprev = cl.rev(node)
72
72
73 # Some revisions with rev > striprev may not be descendants of striprev.
73 # Some revisions with rev > striprev may not be descendants of striprev.
74 # We have to find these revisions and put them in a bundle, so that
74 # We have to find these revisions and put them in a bundle, so that
75 # we can restore them after the truncations.
75 # we can restore them after the truncations.
76 # To create the bundle we use repo.changegroupsubset which requires
76 # To create the bundle we use repo.changegroupsubset which requires
77 # the list of heads and bases of the set of interesting revisions.
77 # the list of heads and bases of the set of interesting revisions.
78 # (head = revision in the set that has no descendant in the set;
78 # (head = revision in the set that has no descendant in the set;
79 # base = revision in the set that has no ancestor in the set)
79 # base = revision in the set that has no ancestor in the set)
80 tostrip = set((striprev,))
80 tostrip = set((striprev,))
81 saveheads = set()
81 saveheads = set()
82 savebases = []
82 savebases = []
83 for r in xrange(striprev + 1, len(cl)):
83 for r in xrange(striprev + 1, len(cl)):
84 parents = cl.parentrevs(r)
84 parents = cl.parentrevs(r)
85 if parents[0] in tostrip or parents[1] in tostrip:
85 if parents[0] in tostrip or parents[1] in tostrip:
86 # r is a descendant of striprev
86 # r is a descendant of striprev
87 tostrip.add(r)
87 tostrip.add(r)
88 # if this is a merge and one of the parents does not descend
88 # if this is a merge and one of the parents does not descend
89 # from striprev, mark that parent as a savehead.
89 # from striprev, mark that parent as a savehead.
90 if parents[1] != nullrev:
90 if parents[1] != nullrev:
91 for p in parents:
91 for p in parents:
92 if p not in tostrip and p > striprev:
92 if p not in tostrip and p > striprev:
93 saveheads.add(p)
93 saveheads.add(p)
94 else:
94 else:
95 # if no parents of this revision will be stripped, mark it as
95 # if no parents of this revision will be stripped, mark it as
96 # a savebase
96 # a savebase
97 if parents[0] < striprev and parents[1] < striprev:
97 if parents[0] < striprev and parents[1] < striprev:
98 savebases.append(cl.node(r))
98 savebases.append(cl.node(r))
99
99
100 saveheads.difference_update(parents)
100 saveheads.difference_update(parents)
101 saveheads.add(r)
101 saveheads.add(r)
102
102
103 saveheads = [cl.node(r) for r in saveheads]
103 saveheads = [cl.node(r) for r in saveheads]
104 files = _collectfiles(repo, striprev)
104 files = _collectfiles(repo, striprev)
105
105
106 extranodes = _collectextranodes(repo, files, striprev)
106 extranodes = _collectextranodes(repo, files, striprev)
107
107
108 # create a changegroup for all the branches we need to keep
108 # create a changegroup for all the branches we need to keep
109 if backup == "all":
109 if backup == "all":
110 _bundle(repo, [node], cl.heads(), node, 'backup')
110 _bundle(repo, [node], cl.heads(), node, 'backup')
111 if saveheads or extranodes:
111 if saveheads or extranodes:
112 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
112 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
113 extranodes)
113 extranodes)
114
114
115 mfst = repo.manifest
115 mfst = repo.manifest
116
116
117 tr = repo.transaction()
117 tr = repo.transaction()
118 offset = len(tr.entries)
118 offset = len(tr.entries)
119
119
120 tr.startgroup()
120 tr.startgroup()
121 cl.strip(striprev, tr)
121 cl.strip(striprev, tr)
122 mfst.strip(striprev, tr)
122 mfst.strip(striprev, tr)
123 for fn in files:
123 for fn in files:
124 repo.file(fn).strip(striprev, tr)
124 repo.file(fn).strip(striprev, tr)
125 tr.endgroup()
125 tr.endgroup()
126
126
127 try:
127 try:
128 for i in xrange(offset, len(tr.entries)):
128 for i in xrange(offset, len(tr.entries)):
129 file, troffset, ignore = tr.entries[i]
129 file, troffset, ignore = tr.entries[i]
130 repo.sopener(file, 'a').truncate(troffset)
130 repo.sopener(file, 'a').truncate(troffset)
131 tr.close()
131 tr.close()
132 except:
132 except:
133 tr.abort()
133 tr.abort()
134 raise
134 raise
135
135
136 if saveheads or extranodes:
136 if saveheads or extranodes:
137 ui.status(_("adding branch\n"))
137 ui.status(_("adding branch\n"))
138 f = open(chgrpfile, "rb")
138 f = open(chgrpfile, "rb")
139 gen = changegroup.readbundle(f, chgrpfile)
139 gen = changegroup.readbundle(f, chgrpfile)
140 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
140 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
141 f.close()
141 f.close()
142 if backup != "strip":
142 if backup != "strip":
143 os.unlink(chgrpfile)
143 os.unlink(chgrpfile)
144
144
145 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now