##// END OF EJS Templates
tag: use match.exact for commit
Matt Mackall -
r8705:509083f5 default
parent child Browse files
Show More
@@ -1,2135 +1,2136 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid is None:
110 if changeid is None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, extra={}):
132 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
133 if isinstance(names, str):
134 allchars = names
134 allchars = names
135 names = (names,)
135 names = (names,)
136 else:
136 else:
137 allchars = ''.join(names)
137 allchars = ''.join(names)
138 for c in self.tag_disallowed:
138 for c in self.tag_disallowed:
139 if c in allchars:
139 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
141
142 for name in names:
142 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
144 local=local)
145
145
146 def writetags(fp, names, munge, prevtags):
146 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
147 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
148 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
149 fp.write('\n')
150 for name in names:
150 for name in names:
151 m = munge and munge(name) or name
151 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
152 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
153 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
155 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
156 fp.close()
157
157
158 prevtags = ''
158 prevtags = ''
159 if local:
159 if local:
160 try:
160 try:
161 fp = self.opener('localtags', 'r+')
161 fp = self.opener('localtags', 'r+')
162 except IOError:
162 except IOError:
163 fp = self.opener('localtags', 'a')
163 fp = self.opener('localtags', 'a')
164 else:
164 else:
165 prevtags = fp.read()
165 prevtags = fp.read()
166
166
167 # local tags are stored in the current charset
167 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
168 writetags(fp, names, None, prevtags)
169 for name in names:
169 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
170 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
171 return
172
172
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
175 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179
179
180 # committed tags are stored in UTF-8
180 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
181 writetags(fp, names, encoding.fromlocal, prevtags)
182
182
183 if '.hgtags' not in self.dirstate:
183 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
184 self.add(['.hgtags'])
185
185
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
186 m = match_.exact(self.root, '', ['.hgtags'])
187 tagnode = self.commit(None, message, user, date, extra=extra, match=m)
187
188
188 for name in names:
189 for name in names:
189 self.hook('tag', node=hex(node), tag=name, local=local)
190 self.hook('tag', node=hex(node), tag=name, local=local)
190
191
191 return tagnode
192 return tagnode
192
193
193 def tag(self, names, node, message, local, user, date):
194 def tag(self, names, node, message, local, user, date):
194 '''tag a revision with one or more symbolic names.
195 '''tag a revision with one or more symbolic names.
195
196
196 names is a list of strings or, when adding a single tag, names may be a
197 names is a list of strings or, when adding a single tag, names may be a
197 string.
198 string.
198
199
199 if local is True, the tags are stored in a per-repository file.
200 if local is True, the tags are stored in a per-repository file.
200 otherwise, they are stored in the .hgtags file, and a new
201 otherwise, they are stored in the .hgtags file, and a new
201 changeset is committed with the change.
202 changeset is committed with the change.
202
203
203 keyword arguments:
204 keyword arguments:
204
205
205 local: whether to store tags in non-version-controlled file
206 local: whether to store tags in non-version-controlled file
206 (default False)
207 (default False)
207
208
208 message: commit message to use if committing
209 message: commit message to use if committing
209
210
210 user: name of user to use if committing
211 user: name of user to use if committing
211
212
212 date: date tuple to use if committing'''
213 date: date tuple to use if committing'''
213
214
214 for x in self.status()[:5]:
215 for x in self.status()[:5]:
215 if '.hgtags' in x:
216 if '.hgtags' in x:
216 raise util.Abort(_('working copy of .hgtags is changed '
217 raise util.Abort(_('working copy of .hgtags is changed '
217 '(please commit .hgtags manually)'))
218 '(please commit .hgtags manually)'))
218
219
219 self.tags() # instantiate the cache
220 self.tags() # instantiate the cache
220 self._tag(names, node, message, local, user, date)
221 self._tag(names, node, message, local, user, date)
221
222
222 def tags(self):
223 def tags(self):
223 '''return a mapping of tag to node'''
224 '''return a mapping of tag to node'''
224 if self.tagscache:
225 if self.tagscache:
225 return self.tagscache
226 return self.tagscache
226
227
227 globaltags = {}
228 globaltags = {}
228 tagtypes = {}
229 tagtypes = {}
229
230
230 def readtags(lines, fn, tagtype):
231 def readtags(lines, fn, tagtype):
231 filetags = {}
232 filetags = {}
232 count = 0
233 count = 0
233
234
234 def warn(msg):
235 def warn(msg):
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236
237
237 for l in lines:
238 for l in lines:
238 count += 1
239 count += 1
239 if not l:
240 if not l:
240 continue
241 continue
241 s = l.split(" ", 1)
242 s = l.split(" ", 1)
242 if len(s) != 2:
243 if len(s) != 2:
243 warn(_("cannot parse entry"))
244 warn(_("cannot parse entry"))
244 continue
245 continue
245 node, key = s
246 node, key = s
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 try:
248 try:
248 bin_n = bin(node)
249 bin_n = bin(node)
249 except TypeError:
250 except TypeError:
250 warn(_("node '%s' is not well formed") % node)
251 warn(_("node '%s' is not well formed") % node)
251 continue
252 continue
252 if bin_n not in self.changelog.nodemap:
253 if bin_n not in self.changelog.nodemap:
253 warn(_("tag '%s' refers to unknown node") % key)
254 warn(_("tag '%s' refers to unknown node") % key)
254 continue
255 continue
255
256
256 h = []
257 h = []
257 if key in filetags:
258 if key in filetags:
258 n, h = filetags[key]
259 n, h = filetags[key]
259 h.append(n)
260 h.append(n)
260 filetags[key] = (bin_n, h)
261 filetags[key] = (bin_n, h)
261
262
262 for k, nh in filetags.iteritems():
263 for k, nh in filetags.iteritems():
263 if k not in globaltags:
264 if k not in globaltags:
264 globaltags[k] = nh
265 globaltags[k] = nh
265 tagtypes[k] = tagtype
266 tagtypes[k] = tagtype
266 continue
267 continue
267
268
268 # we prefer the global tag if:
269 # we prefer the global tag if:
269 # it supercedes us OR
270 # it supercedes us OR
270 # mutual supercedes and it has a higher rank
271 # mutual supercedes and it has a higher rank
271 # otherwise we win because we're tip-most
272 # otherwise we win because we're tip-most
272 an, ah = nh
273 an, ah = nh
273 bn, bh = globaltags[k]
274 bn, bh = globaltags[k]
274 if (bn != an and an in bh and
275 if (bn != an and an in bh and
275 (bn not in ah or len(bh) > len(ah))):
276 (bn not in ah or len(bh) > len(ah))):
276 an = bn
277 an = bn
277 ah.extend([n for n in bh if n not in ah])
278 ah.extend([n for n in bh if n not in ah])
278 globaltags[k] = an, ah
279 globaltags[k] = an, ah
279 tagtypes[k] = tagtype
280 tagtypes[k] = tagtype
280
281
281 # read the tags file from each head, ending with the tip
282 # read the tags file from each head, ending with the tip
282 f = None
283 f = None
283 for rev, node, fnode in self._hgtagsnodes():
284 for rev, node, fnode in self._hgtagsnodes():
284 f = (f and f.filectx(fnode) or
285 f = (f and f.filectx(fnode) or
285 self.filectx('.hgtags', fileid=fnode))
286 self.filectx('.hgtags', fileid=fnode))
286 readtags(f.data().splitlines(), f, "global")
287 readtags(f.data().splitlines(), f, "global")
287
288
288 try:
289 try:
289 data = encoding.fromlocal(self.opener("localtags").read())
290 data = encoding.fromlocal(self.opener("localtags").read())
290 # localtags are stored in the local character set
291 # localtags are stored in the local character set
291 # while the internal tag table is stored in UTF-8
292 # while the internal tag table is stored in UTF-8
292 readtags(data.splitlines(), "localtags", "local")
293 readtags(data.splitlines(), "localtags", "local")
293 except IOError:
294 except IOError:
294 pass
295 pass
295
296
296 self.tagscache = {}
297 self.tagscache = {}
297 self._tagstypecache = {}
298 self._tagstypecache = {}
298 for k, nh in globaltags.iteritems():
299 for k, nh in globaltags.iteritems():
299 n = nh[0]
300 n = nh[0]
300 if n != nullid:
301 if n != nullid:
301 self.tagscache[k] = n
302 self.tagscache[k] = n
302 self._tagstypecache[k] = tagtypes[k]
303 self._tagstypecache[k] = tagtypes[k]
303 self.tagscache['tip'] = self.changelog.tip()
304 self.tagscache['tip'] = self.changelog.tip()
304 return self.tagscache
305 return self.tagscache
305
306
306 def tagtype(self, tagname):
307 def tagtype(self, tagname):
307 '''
308 '''
308 return the type of the given tag. result can be:
309 return the type of the given tag. result can be:
309
310
310 'local' : a local tag
311 'local' : a local tag
311 'global' : a global tag
312 'global' : a global tag
312 None : tag does not exist
313 None : tag does not exist
313 '''
314 '''
314
315
315 self.tags()
316 self.tags()
316
317
317 return self._tagstypecache.get(tagname)
318 return self._tagstypecache.get(tagname)
318
319
319 def _hgtagsnodes(self):
320 def _hgtagsnodes(self):
320 last = {}
321 last = {}
321 ret = []
322 ret = []
322 for node in reversed(self.heads()):
323 for node in reversed(self.heads()):
323 c = self[node]
324 c = self[node]
324 rev = c.rev()
325 rev = c.rev()
325 try:
326 try:
326 fnode = c.filenode('.hgtags')
327 fnode = c.filenode('.hgtags')
327 except error.LookupError:
328 except error.LookupError:
328 continue
329 continue
329 ret.append((rev, node, fnode))
330 ret.append((rev, node, fnode))
330 if fnode in last:
331 if fnode in last:
331 ret[last[fnode]] = None
332 ret[last[fnode]] = None
332 last[fnode] = len(ret) - 1
333 last[fnode] = len(ret) - 1
333 return [item for item in ret if item]
334 return [item for item in ret if item]
334
335
335 def tagslist(self):
336 def tagslist(self):
336 '''return a list of tags ordered by revision'''
337 '''return a list of tags ordered by revision'''
337 l = []
338 l = []
338 for t, n in self.tags().iteritems():
339 for t, n in self.tags().iteritems():
339 try:
340 try:
340 r = self.changelog.rev(n)
341 r = self.changelog.rev(n)
341 except:
342 except:
342 r = -2 # sort to the beginning of the list if unknown
343 r = -2 # sort to the beginning of the list if unknown
343 l.append((r, t, n))
344 l.append((r, t, n))
344 return [(t, n) for r, t, n in sorted(l)]
345 return [(t, n) for r, t, n in sorted(l)]
345
346
346 def nodetags(self, node):
347 def nodetags(self, node):
347 '''return the tags associated with a node'''
348 '''return the tags associated with a node'''
348 if not self.nodetagscache:
349 if not self.nodetagscache:
349 self.nodetagscache = {}
350 self.nodetagscache = {}
350 for t, n in self.tags().iteritems():
351 for t, n in self.tags().iteritems():
351 self.nodetagscache.setdefault(n, []).append(t)
352 self.nodetagscache.setdefault(n, []).append(t)
352 return self.nodetagscache.get(node, [])
353 return self.nodetagscache.get(node, [])
353
354
354 def _branchtags(self, partial, lrev):
355 def _branchtags(self, partial, lrev):
355 # TODO: rename this function?
356 # TODO: rename this function?
356 tiprev = len(self) - 1
357 tiprev = len(self) - 1
357 if lrev != tiprev:
358 if lrev != tiprev:
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360
361
361 return partial
362 return partial
362
363
363 def branchmap(self):
364 def branchmap(self):
364 tip = self.changelog.tip()
365 tip = self.changelog.tip()
365 if self.branchcache is not None and self._branchcachetip == tip:
366 if self.branchcache is not None and self._branchcachetip == tip:
366 return self.branchcache
367 return self.branchcache
367
368
368 oldtip = self._branchcachetip
369 oldtip = self._branchcachetip
369 self._branchcachetip = tip
370 self._branchcachetip = tip
370 if self.branchcache is None:
371 if self.branchcache is None:
371 self.branchcache = {} # avoid recursion in changectx
372 self.branchcache = {} # avoid recursion in changectx
372 else:
373 else:
373 self.branchcache.clear() # keep using the same dict
374 self.branchcache.clear() # keep using the same dict
374 if oldtip is None or oldtip not in self.changelog.nodemap:
375 if oldtip is None or oldtip not in self.changelog.nodemap:
375 partial, last, lrev = self._readbranchcache()
376 partial, last, lrev = self._readbranchcache()
376 else:
377 else:
377 lrev = self.changelog.rev(oldtip)
378 lrev = self.changelog.rev(oldtip)
378 partial = self._ubranchcache
379 partial = self._ubranchcache
379
380
380 self._branchtags(partial, lrev)
381 self._branchtags(partial, lrev)
381 # this private cache holds all heads (not just tips)
382 # this private cache holds all heads (not just tips)
382 self._ubranchcache = partial
383 self._ubranchcache = partial
383
384
384 # the branch cache is stored on disk as UTF-8, but in the local
385 # the branch cache is stored on disk as UTF-8, but in the local
385 # charset internally
386 # charset internally
386 for k, v in partial.iteritems():
387 for k, v in partial.iteritems():
387 self.branchcache[encoding.tolocal(k)] = v
388 self.branchcache[encoding.tolocal(k)] = v
388 return self.branchcache
389 return self.branchcache
389
390
390
391
391 def branchtags(self):
392 def branchtags(self):
392 '''return a dict where branch names map to the tipmost head of
393 '''return a dict where branch names map to the tipmost head of
393 the branch, open heads come before closed'''
394 the branch, open heads come before closed'''
394 bt = {}
395 bt = {}
395 for bn, heads in self.branchmap().iteritems():
396 for bn, heads in self.branchmap().iteritems():
396 head = None
397 head = None
397 for i in range(len(heads)-1, -1, -1):
398 for i in range(len(heads)-1, -1, -1):
398 h = heads[i]
399 h = heads[i]
399 if 'close' not in self.changelog.read(h)[5]:
400 if 'close' not in self.changelog.read(h)[5]:
400 head = h
401 head = h
401 break
402 break
402 # no open heads were found
403 # no open heads were found
403 if head is None:
404 if head is None:
404 head = heads[-1]
405 head = heads[-1]
405 bt[bn] = head
406 bt[bn] = head
406 return bt
407 return bt
407
408
408
409
409 def _readbranchcache(self):
410 def _readbranchcache(self):
410 partial = {}
411 partial = {}
411 try:
412 try:
412 f = self.opener("branchheads.cache")
413 f = self.opener("branchheads.cache")
413 lines = f.read().split('\n')
414 lines = f.read().split('\n')
414 f.close()
415 f.close()
415 except (IOError, OSError):
416 except (IOError, OSError):
416 return {}, nullid, nullrev
417 return {}, nullid, nullrev
417
418
418 try:
419 try:
419 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = bin(last), int(lrev)
421 last, lrev = bin(last), int(lrev)
421 if lrev >= len(self) or self[lrev].node() != last:
422 if lrev >= len(self) or self[lrev].node() != last:
422 # invalidate the cache
423 # invalidate the cache
423 raise ValueError('invalidating branch cache (tip differs)')
424 raise ValueError('invalidating branch cache (tip differs)')
424 for l in lines:
425 for l in lines:
425 if not l: continue
426 if not l: continue
426 node, label = l.split(" ", 1)
427 node, label = l.split(" ", 1)
427 partial.setdefault(label.strip(), []).append(bin(node))
428 partial.setdefault(label.strip(), []).append(bin(node))
428 except KeyboardInterrupt:
429 except KeyboardInterrupt:
429 raise
430 raise
430 except Exception, inst:
431 except Exception, inst:
431 if self.ui.debugflag:
432 if self.ui.debugflag:
432 self.ui.warn(str(inst), '\n')
433 self.ui.warn(str(inst), '\n')
433 partial, last, lrev = {}, nullid, nullrev
434 partial, last, lrev = {}, nullid, nullrev
434 return partial, last, lrev
435 return partial, last, lrev
435
436
436 def _writebranchcache(self, branches, tip, tiprev):
437 def _writebranchcache(self, branches, tip, tiprev):
437 try:
438 try:
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f.write("%s %s\n" % (hex(tip), tiprev))
440 f.write("%s %s\n" % (hex(tip), tiprev))
440 for label, nodes in branches.iteritems():
441 for label, nodes in branches.iteritems():
441 for node in nodes:
442 for node in nodes:
442 f.write("%s %s\n" % (hex(node), label))
443 f.write("%s %s\n" % (hex(node), label))
443 f.rename()
444 f.rename()
444 except (IOError, OSError):
445 except (IOError, OSError):
445 pass
446 pass
446
447
447 def _updatebranchcache(self, partial, start, end):
448 def _updatebranchcache(self, partial, start, end):
448 for r in xrange(start, end):
449 for r in xrange(start, end):
449 c = self[r]
450 c = self[r]
450 b = c.branch()
451 b = c.branch()
451 bheads = partial.setdefault(b, [])
452 bheads = partial.setdefault(b, [])
452 bheads.append(c.node())
453 bheads.append(c.node())
453 for p in c.parents():
454 for p in c.parents():
454 pn = p.node()
455 pn = p.node()
455 if pn in bheads:
456 if pn in bheads:
456 bheads.remove(pn)
457 bheads.remove(pn)
457
458
458 def lookup(self, key):
459 def lookup(self, key):
459 if isinstance(key, int):
460 if isinstance(key, int):
460 return self.changelog.node(key)
461 return self.changelog.node(key)
461 elif key == '.':
462 elif key == '.':
462 return self.dirstate.parents()[0]
463 return self.dirstate.parents()[0]
463 elif key == 'null':
464 elif key == 'null':
464 return nullid
465 return nullid
465 elif key == 'tip':
466 elif key == 'tip':
466 return self.changelog.tip()
467 return self.changelog.tip()
467 n = self.changelog._match(key)
468 n = self.changelog._match(key)
468 if n:
469 if n:
469 return n
470 return n
470 if key in self.tags():
471 if key in self.tags():
471 return self.tags()[key]
472 return self.tags()[key]
472 if key in self.branchtags():
473 if key in self.branchtags():
473 return self.branchtags()[key]
474 return self.branchtags()[key]
474 n = self.changelog._partialmatch(key)
475 n = self.changelog._partialmatch(key)
475 if n:
476 if n:
476 return n
477 return n
477
478
478 # can't find key, check if it might have come from damaged dirstate
479 # can't find key, check if it might have come from damaged dirstate
479 if key in self.dirstate.parents():
480 if key in self.dirstate.parents():
480 raise error.Abort(_("working directory has unknown parent '%s'!")
481 raise error.Abort(_("working directory has unknown parent '%s'!")
481 % short(key))
482 % short(key))
482 try:
483 try:
483 if len(key) == 20:
484 if len(key) == 20:
484 key = hex(key)
485 key = hex(key)
485 except:
486 except:
486 pass
487 pass
487 raise error.RepoError(_("unknown revision '%s'") % key)
488 raise error.RepoError(_("unknown revision '%s'") % key)
488
489
489 def local(self):
490 def local(self):
490 return True
491 return True
491
492
492 def join(self, f):
493 def join(self, f):
493 return os.path.join(self.path, f)
494 return os.path.join(self.path, f)
494
495
495 def wjoin(self, f):
496 def wjoin(self, f):
496 return os.path.join(self.root, f)
497 return os.path.join(self.root, f)
497
498
498 def rjoin(self, f):
499 def rjoin(self, f):
499 return os.path.join(self.root, util.pconvert(f))
500 return os.path.join(self.root, util.pconvert(f))
500
501
501 def file(self, f):
502 def file(self, f):
502 if f[0] == '/':
503 if f[0] == '/':
503 f = f[1:]
504 f = f[1:]
504 return filelog.filelog(self.sopener, f)
505 return filelog.filelog(self.sopener, f)
505
506
506 def changectx(self, changeid):
507 def changectx(self, changeid):
507 return self[changeid]
508 return self[changeid]
508
509
509 def parents(self, changeid=None):
510 def parents(self, changeid=None):
510 '''get list of changectxs for parents of changeid'''
511 '''get list of changectxs for parents of changeid'''
511 return self[changeid].parents()
512 return self[changeid].parents()
512
513
513 def filectx(self, path, changeid=None, fileid=None):
514 def filectx(self, path, changeid=None, fileid=None):
514 """changeid can be a changeset revision, node, or tag.
515 """changeid can be a changeset revision, node, or tag.
515 fileid can be a file revision or node."""
516 fileid can be a file revision or node."""
516 return context.filectx(self, path, changeid, fileid)
517 return context.filectx(self, path, changeid, fileid)
517
518
518 def getcwd(self):
519 def getcwd(self):
519 return self.dirstate.getcwd()
520 return self.dirstate.getcwd()
520
521
521 def pathto(self, f, cwd=None):
522 def pathto(self, f, cwd=None):
522 return self.dirstate.pathto(f, cwd)
523 return self.dirstate.pathto(f, cwd)
523
524
524 def wfile(self, f, mode='r'):
525 def wfile(self, f, mode='r'):
525 return self.wopener(f, mode)
526 return self.wopener(f, mode)
526
527
527 def _link(self, f):
528 def _link(self, f):
528 return os.path.islink(self.wjoin(f))
529 return os.path.islink(self.wjoin(f))
529
530
530 def _filter(self, filter, filename, data):
531 def _filter(self, filter, filename, data):
531 if filter not in self.filterpats:
532 if filter not in self.filterpats:
532 l = []
533 l = []
533 for pat, cmd in self.ui.configitems(filter):
534 for pat, cmd in self.ui.configitems(filter):
534 if cmd == '!':
535 if cmd == '!':
535 continue
536 continue
536 mf = match_.match(self.root, '', [pat])
537 mf = match_.match(self.root, '', [pat])
537 fn = None
538 fn = None
538 params = cmd
539 params = cmd
539 for name, filterfn in self._datafilters.iteritems():
540 for name, filterfn in self._datafilters.iteritems():
540 if cmd.startswith(name):
541 if cmd.startswith(name):
541 fn = filterfn
542 fn = filterfn
542 params = cmd[len(name):].lstrip()
543 params = cmd[len(name):].lstrip()
543 break
544 break
544 if not fn:
545 if not fn:
545 fn = lambda s, c, **kwargs: util.filter(s, c)
546 fn = lambda s, c, **kwargs: util.filter(s, c)
546 # Wrap old filters not supporting keyword arguments
547 # Wrap old filters not supporting keyword arguments
547 if not inspect.getargspec(fn)[2]:
548 if not inspect.getargspec(fn)[2]:
548 oldfn = fn
549 oldfn = fn
549 fn = lambda s, c, **kwargs: oldfn(s, c)
550 fn = lambda s, c, **kwargs: oldfn(s, c)
550 l.append((mf, fn, params))
551 l.append((mf, fn, params))
551 self.filterpats[filter] = l
552 self.filterpats[filter] = l
552
553
553 for mf, fn, cmd in self.filterpats[filter]:
554 for mf, fn, cmd in self.filterpats[filter]:
554 if mf(filename):
555 if mf(filename):
555 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
556 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
556 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
557 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
557 break
558 break
558
559
559 return data
560 return data
560
561
561 def adddatafilter(self, name, filter):
562 def adddatafilter(self, name, filter):
562 self._datafilters[name] = filter
563 self._datafilters[name] = filter
563
564
564 def wread(self, filename):
565 def wread(self, filename):
565 if self._link(filename):
566 if self._link(filename):
566 data = os.readlink(self.wjoin(filename))
567 data = os.readlink(self.wjoin(filename))
567 else:
568 else:
568 data = self.wopener(filename, 'r').read()
569 data = self.wopener(filename, 'r').read()
569 return self._filter("encode", filename, data)
570 return self._filter("encode", filename, data)
570
571
571 def wwrite(self, filename, data, flags):
572 def wwrite(self, filename, data, flags):
572 data = self._filter("decode", filename, data)
573 data = self._filter("decode", filename, data)
573 try:
574 try:
574 os.unlink(self.wjoin(filename))
575 os.unlink(self.wjoin(filename))
575 except OSError:
576 except OSError:
576 pass
577 pass
577 if 'l' in flags:
578 if 'l' in flags:
578 self.wopener.symlink(data, filename)
579 self.wopener.symlink(data, filename)
579 else:
580 else:
580 self.wopener(filename, 'w').write(data)
581 self.wopener(filename, 'w').write(data)
581 if 'x' in flags:
582 if 'x' in flags:
582 util.set_flags(self.wjoin(filename), False, True)
583 util.set_flags(self.wjoin(filename), False, True)
583
584
584 def wwritedata(self, filename, data):
585 def wwritedata(self, filename, data):
585 return self._filter("decode", filename, data)
586 return self._filter("decode", filename, data)
586
587
587 def transaction(self):
588 def transaction(self):
588 tr = self._transref and self._transref() or None
589 tr = self._transref and self._transref() or None
589 if tr and tr.running():
590 if tr and tr.running():
590 return tr.nest()
591 return tr.nest()
591
592
592 # abort here if the journal already exists
593 # abort here if the journal already exists
593 if os.path.exists(self.sjoin("journal")):
594 if os.path.exists(self.sjoin("journal")):
594 raise error.RepoError(_("journal already exists - run hg recover"))
595 raise error.RepoError(_("journal already exists - run hg recover"))
595
596
596 # save dirstate for rollback
597 # save dirstate for rollback
597 try:
598 try:
598 ds = self.opener("dirstate").read()
599 ds = self.opener("dirstate").read()
599 except IOError:
600 except IOError:
600 ds = ""
601 ds = ""
601 self.opener("journal.dirstate", "w").write(ds)
602 self.opener("journal.dirstate", "w").write(ds)
602 self.opener("journal.branch", "w").write(self.dirstate.branch())
603 self.opener("journal.branch", "w").write(self.dirstate.branch())
603
604
604 renames = [(self.sjoin("journal"), self.sjoin("undo")),
605 renames = [(self.sjoin("journal"), self.sjoin("undo")),
605 (self.join("journal.dirstate"), self.join("undo.dirstate")),
606 (self.join("journal.dirstate"), self.join("undo.dirstate")),
606 (self.join("journal.branch"), self.join("undo.branch"))]
607 (self.join("journal.branch"), self.join("undo.branch"))]
607 tr = transaction.transaction(self.ui.warn, self.sopener,
608 tr = transaction.transaction(self.ui.warn, self.sopener,
608 self.sjoin("journal"),
609 self.sjoin("journal"),
609 aftertrans(renames),
610 aftertrans(renames),
610 self.store.createmode)
611 self.store.createmode)
611 self._transref = weakref.ref(tr)
612 self._transref = weakref.ref(tr)
612 return tr
613 return tr
613
614
614 def recover(self):
615 def recover(self):
615 lock = self.lock()
616 lock = self.lock()
616 try:
617 try:
617 if os.path.exists(self.sjoin("journal")):
618 if os.path.exists(self.sjoin("journal")):
618 self.ui.status(_("rolling back interrupted transaction\n"))
619 self.ui.status(_("rolling back interrupted transaction\n"))
619 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
620 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
620 self.invalidate()
621 self.invalidate()
621 return True
622 return True
622 else:
623 else:
623 self.ui.warn(_("no interrupted transaction available\n"))
624 self.ui.warn(_("no interrupted transaction available\n"))
624 return False
625 return False
625 finally:
626 finally:
626 lock.release()
627 lock.release()
627
628
628 def rollback(self):
629 def rollback(self):
629 wlock = lock = None
630 wlock = lock = None
630 try:
631 try:
631 wlock = self.wlock()
632 wlock = self.wlock()
632 lock = self.lock()
633 lock = self.lock()
633 if os.path.exists(self.sjoin("undo")):
634 if os.path.exists(self.sjoin("undo")):
634 self.ui.status(_("rolling back last transaction\n"))
635 self.ui.status(_("rolling back last transaction\n"))
635 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
636 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
636 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
637 try:
638 try:
638 branch = self.opener("undo.branch").read()
639 branch = self.opener("undo.branch").read()
639 self.dirstate.setbranch(branch)
640 self.dirstate.setbranch(branch)
640 except IOError:
641 except IOError:
641 self.ui.warn(_("Named branch could not be reset, "
642 self.ui.warn(_("Named branch could not be reset, "
642 "current branch still is: %s\n")
643 "current branch still is: %s\n")
643 % encoding.tolocal(self.dirstate.branch()))
644 % encoding.tolocal(self.dirstate.branch()))
644 self.invalidate()
645 self.invalidate()
645 self.dirstate.invalidate()
646 self.dirstate.invalidate()
646 else:
647 else:
647 self.ui.warn(_("no rollback information available\n"))
648 self.ui.warn(_("no rollback information available\n"))
648 finally:
649 finally:
649 release(lock, wlock)
650 release(lock, wlock)
650
651
651 def invalidate(self):
652 def invalidate(self):
652 for a in "changelog manifest".split():
653 for a in "changelog manifest".split():
653 if a in self.__dict__:
654 if a in self.__dict__:
654 delattr(self, a)
655 delattr(self, a)
655 self.tagscache = None
656 self.tagscache = None
656 self._tagstypecache = None
657 self._tagstypecache = None
657 self.nodetagscache = None
658 self.nodetagscache = None
658 self.branchcache = None
659 self.branchcache = None
659 self._ubranchcache = None
660 self._ubranchcache = None
660 self._branchcachetip = None
661 self._branchcachetip = None
661
662
662 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
663 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
663 try:
664 try:
664 l = lock.lock(lockname, 0, releasefn, desc=desc)
665 l = lock.lock(lockname, 0, releasefn, desc=desc)
665 except error.LockHeld, inst:
666 except error.LockHeld, inst:
666 if not wait:
667 if not wait:
667 raise
668 raise
668 self.ui.warn(_("waiting for lock on %s held by %r\n") %
669 self.ui.warn(_("waiting for lock on %s held by %r\n") %
669 (desc, inst.locker))
670 (desc, inst.locker))
670 # default to 600 seconds timeout
671 # default to 600 seconds timeout
671 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
672 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
672 releasefn, desc=desc)
673 releasefn, desc=desc)
673 if acquirefn:
674 if acquirefn:
674 acquirefn()
675 acquirefn()
675 return l
676 return l
676
677
677 def lock(self, wait=True):
678 def lock(self, wait=True):
678 l = self._lockref and self._lockref()
679 l = self._lockref and self._lockref()
679 if l is not None and l.held:
680 if l is not None and l.held:
680 l.lock()
681 l.lock()
681 return l
682 return l
682
683
683 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
684 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
684 _('repository %s') % self.origroot)
685 _('repository %s') % self.origroot)
685 self._lockref = weakref.ref(l)
686 self._lockref = weakref.ref(l)
686 return l
687 return l
687
688
688 def wlock(self, wait=True):
689 def wlock(self, wait=True):
689 l = self._wlockref and self._wlockref()
690 l = self._wlockref and self._wlockref()
690 if l is not None and l.held:
691 if l is not None and l.held:
691 l.lock()
692 l.lock()
692 return l
693 return l
693
694
694 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
695 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
695 self.dirstate.invalidate, _('working directory of %s') %
696 self.dirstate.invalidate, _('working directory of %s') %
696 self.origroot)
697 self.origroot)
697 self._wlockref = weakref.ref(l)
698 self._wlockref = weakref.ref(l)
698 return l
699 return l
699
700
700 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
701 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
701 """
702 """
702 commit an individual file as part of a larger transaction
703 commit an individual file as part of a larger transaction
703 """
704 """
704
705
705 fname = fctx.path()
706 fname = fctx.path()
706 text = fctx.data()
707 text = fctx.data()
707 flog = self.file(fname)
708 flog = self.file(fname)
708 fparent1 = manifest1.get(fname, nullid)
709 fparent1 = manifest1.get(fname, nullid)
709 fparent2 = fparent2o = manifest2.get(fname, nullid)
710 fparent2 = fparent2o = manifest2.get(fname, nullid)
710
711
711 meta = {}
712 meta = {}
712 copy = fctx.renamed()
713 copy = fctx.renamed()
713 if copy and copy[0] != fname:
714 if copy and copy[0] != fname:
714 # Mark the new revision of this file as a copy of another
715 # Mark the new revision of this file as a copy of another
715 # file. This copy data will effectively act as a parent
716 # file. This copy data will effectively act as a parent
716 # of this new revision. If this is a merge, the first
717 # of this new revision. If this is a merge, the first
717 # parent will be the nullid (meaning "look up the copy data")
718 # parent will be the nullid (meaning "look up the copy data")
718 # and the second one will be the other parent. For example:
719 # and the second one will be the other parent. For example:
719 #
720 #
720 # 0 --- 1 --- 3 rev1 changes file foo
721 # 0 --- 1 --- 3 rev1 changes file foo
721 # \ / rev2 renames foo to bar and changes it
722 # \ / rev2 renames foo to bar and changes it
722 # \- 2 -/ rev3 should have bar with all changes and
723 # \- 2 -/ rev3 should have bar with all changes and
723 # should record that bar descends from
724 # should record that bar descends from
724 # bar in rev2 and foo in rev1
725 # bar in rev2 and foo in rev1
725 #
726 #
726 # this allows this merge to succeed:
727 # this allows this merge to succeed:
727 #
728 #
728 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
729 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
729 # \ / merging rev3 and rev4 should use bar@rev2
730 # \ / merging rev3 and rev4 should use bar@rev2
730 # \- 2 --- 4 as the merge base
731 # \- 2 --- 4 as the merge base
731 #
732 #
732
733
733 cfname = copy[0]
734 cfname = copy[0]
734 crev = manifest1.get(cfname)
735 crev = manifest1.get(cfname)
735 newfparent = fparent2
736 newfparent = fparent2
736
737
737 if manifest2: # branch merge
738 if manifest2: # branch merge
738 if fparent2 == nullid or crev is None: # copied on remote side
739 if fparent2 == nullid or crev is None: # copied on remote side
739 if cfname in manifest2:
740 if cfname in manifest2:
740 crev = manifest2[cfname]
741 crev = manifest2[cfname]
741 newfparent = fparent1
742 newfparent = fparent1
742
743
743 # find source in nearest ancestor if we've lost track
744 # find source in nearest ancestor if we've lost track
744 if not crev:
745 if not crev:
745 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
746 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
746 (fname, cfname))
747 (fname, cfname))
747 for ancestor in self['.'].ancestors():
748 for ancestor in self['.'].ancestors():
748 if cfname in ancestor:
749 if cfname in ancestor:
749 crev = ancestor[cfname].filenode()
750 crev = ancestor[cfname].filenode()
750 break
751 break
751
752
752 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
753 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
753 meta["copy"] = cfname
754 meta["copy"] = cfname
754 meta["copyrev"] = hex(crev)
755 meta["copyrev"] = hex(crev)
755 fparent1, fparent2 = nullid, newfparent
756 fparent1, fparent2 = nullid, newfparent
756 elif fparent2 != nullid:
757 elif fparent2 != nullid:
757 # is one parent an ancestor of the other?
758 # is one parent an ancestor of the other?
758 fparentancestor = flog.ancestor(fparent1, fparent2)
759 fparentancestor = flog.ancestor(fparent1, fparent2)
759 if fparentancestor == fparent1:
760 if fparentancestor == fparent1:
760 fparent1, fparent2 = fparent2, nullid
761 fparent1, fparent2 = fparent2, nullid
761 elif fparentancestor == fparent2:
762 elif fparentancestor == fparent2:
762 fparent2 = nullid
763 fparent2 = nullid
763
764
764 # is the file changed?
765 # is the file changed?
765 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
766 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
766 changelist.append(fname)
767 changelist.append(fname)
767 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
768 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
768
769
769 # are just the flags changed during merge?
770 # are just the flags changed during merge?
770 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
771 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
771 changelist.append(fname)
772 changelist.append(fname)
772
773
773 return fparent1
774 return fparent1
774
775
775 def commit(self, files=None, text="", user=None, date=None, match=None,
776 def commit(self, files=None, text="", user=None, date=None, match=None,
776 force=False, editor=False, extra={}):
777 force=False, editor=False, extra={}):
777 """Add a new revision to current repository.
778 """Add a new revision to current repository.
778
779
779 Revision information is gathered from the working directory, files and
780 Revision information is gathered from the working directory, files and
780 match can be used to filter the committed files.
781 match can be used to filter the committed files.
781 If editor is supplied, it is called to get a commit message.
782 If editor is supplied, it is called to get a commit message.
782 """
783 """
783 wlock = self.wlock()
784 wlock = self.wlock()
784 try:
785 try:
785 p1, p2 = self.dirstate.parents()
786 p1, p2 = self.dirstate.parents()
786
787
787 if (not force and p2 != nullid and match and
788 if (not force and p2 != nullid and match and
788 (match.files() or match.anypats())):
789 (match.files() or match.anypats())):
789 raise util.Abort(_('cannot partially commit a merge '
790 raise util.Abort(_('cannot partially commit a merge '
790 '(do not specify files or patterns)'))
791 '(do not specify files or patterns)'))
791
792
792 if files:
793 if files:
793 modified, removed = [], []
794 modified, removed = [], []
794 for f in sorted(set(files)):
795 for f in sorted(set(files)):
795 s = self.dirstate[f]
796 s = self.dirstate[f]
796 if s in 'nma':
797 if s in 'nma':
797 modified.append(f)
798 modified.append(f)
798 elif s == 'r':
799 elif s == 'r':
799 removed.append(f)
800 removed.append(f)
800 else:
801 else:
801 self.ui.warn(_("%s not tracked!\n") % f)
802 self.ui.warn(_("%s not tracked!\n") % f)
802 changes = [modified, [], removed, [], []]
803 changes = [modified, [], removed, [], []]
803 else:
804 else:
804 changes = self.status(match=match, clean=force)
805 changes = self.status(match=match, clean=force)
805 if force:
806 if force:
806 changes[0].extend(changes[6])
807 changes[0].extend(changes[6])
807
808
808 if (not force and not extra.get("close") and p2 == nullid
809 if (not force and not extra.get("close") and p2 == nullid
809 and not (changes[0] or changes[1] or changes[2])
810 and not (changes[0] or changes[1] or changes[2])
810 and self[None].branch() == self['.'].branch()):
811 and self[None].branch() == self['.'].branch()):
811 self.ui.status(_("nothing changed\n"))
812 self.ui.status(_("nothing changed\n"))
812 return None
813 return None
813
814
814 ms = merge_.mergestate(self)
815 ms = merge_.mergestate(self)
815 for f in changes[0]:
816 for f in changes[0]:
816 if f in ms and ms[f] == 'u':
817 if f in ms and ms[f] == 'u':
817 raise util.Abort(_("unresolved merge conflicts "
818 raise util.Abort(_("unresolved merge conflicts "
818 "(see hg resolve)"))
819 "(see hg resolve)"))
819
820
820 wctx = context.workingctx(self, (p1, p2), text, user, date,
821 wctx = context.workingctx(self, (p1, p2), text, user, date,
821 extra, changes)
822 extra, changes)
822 if editor:
823 if editor:
823 wctx._text = editor(self, wctx,
824 wctx._text = editor(self, wctx,
824 changes[1], changes[0], changes[2])
825 changes[1], changes[0], changes[2])
825 ret = self.commitctx(wctx, True)
826 ret = self.commitctx(wctx, True)
826
827
827 # update dirstate and mergestate
828 # update dirstate and mergestate
828 for f in changes[0] + changes[1]:
829 for f in changes[0] + changes[1]:
829 self.dirstate.normal(f)
830 self.dirstate.normal(f)
830 for f in changes[2]:
831 for f in changes[2]:
831 self.dirstate.forget(f)
832 self.dirstate.forget(f)
832 self.dirstate.setparents(ret)
833 self.dirstate.setparents(ret)
833 ms.reset()
834 ms.reset()
834
835
835 return ret
836 return ret
836
837
837 finally:
838 finally:
838 wlock.release()
839 wlock.release()
839
840
840 def commitctx(self, ctx, error=False):
841 def commitctx(self, ctx, error=False):
841 """Add a new revision to current repository.
842 """Add a new revision to current repository.
842
843
843 Revision information is passed via the context argument.
844 Revision information is passed via the context argument.
844 """
845 """
845
846
846 tr = lock = None
847 tr = lock = None
847 removed = ctx.removed()
848 removed = ctx.removed()
848 p1, p2 = ctx.p1(), ctx.p2()
849 p1, p2 = ctx.p1(), ctx.p2()
849 m1 = p1.manifest().copy()
850 m1 = p1.manifest().copy()
850 m2 = p2.manifest()
851 m2 = p2.manifest()
851 user = ctx.user()
852 user = ctx.user()
852
853
853 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
854 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
854 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
855 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
855
856
856 lock = self.lock()
857 lock = self.lock()
857 try:
858 try:
858 tr = self.transaction()
859 tr = self.transaction()
859 trp = weakref.proxy(tr)
860 trp = weakref.proxy(tr)
860
861
861 # check in files
862 # check in files
862 new = {}
863 new = {}
863 changed = []
864 changed = []
864 linkrev = len(self)
865 linkrev = len(self)
865 for f in sorted(ctx.modified() + ctx.added()):
866 for f in sorted(ctx.modified() + ctx.added()):
866 self.ui.note(f + "\n")
867 self.ui.note(f + "\n")
867 try:
868 try:
868 fctx = ctx[f]
869 fctx = ctx[f]
869 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
870 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
870 changed)
871 changed)
871 m1.set(f, fctx.flags())
872 m1.set(f, fctx.flags())
872 except (OSError, IOError):
873 except (OSError, IOError):
873 if error:
874 if error:
874 self.ui.warn(_("trouble committing %s!\n") % f)
875 self.ui.warn(_("trouble committing %s!\n") % f)
875 raise
876 raise
876 else:
877 else:
877 removed.append(f)
878 removed.append(f)
878
879
879 # update manifest
880 # update manifest
880 m1.update(new)
881 m1.update(new)
881 removed = [f for f in sorted(removed) if f in m1 or f in m2]
882 removed = [f for f in sorted(removed) if f in m1 or f in m2]
882 drop = [f for f in removed if f in m1]
883 drop = [f for f in removed if f in m1]
883 for f in drop:
884 for f in drop:
884 del m1[f]
885 del m1[f]
885 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
886 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
886 p2.manifestnode(), (new, drop))
887 p2.manifestnode(), (new, drop))
887
888
888 # update changelog
889 # update changelog
889 self.changelog.delayupdate()
890 self.changelog.delayupdate()
890 n = self.changelog.add(mn, changed + removed, ctx.description(),
891 n = self.changelog.add(mn, changed + removed, ctx.description(),
891 trp, p1.node(), p2.node(),
892 trp, p1.node(), p2.node(),
892 user, ctx.date(), ctx.extra().copy())
893 user, ctx.date(), ctx.extra().copy())
893 p = lambda: self.changelog.writepending() and self.root or ""
894 p = lambda: self.changelog.writepending() and self.root or ""
894 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
895 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
895 parent2=xp2, pending=p)
896 parent2=xp2, pending=p)
896 self.changelog.finalize(trp)
897 self.changelog.finalize(trp)
897 tr.close()
898 tr.close()
898
899
899 if self.branchcache:
900 if self.branchcache:
900 self.branchtags()
901 self.branchtags()
901
902
902 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
903 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
903 return n
904 return n
904 finally:
905 finally:
905 del tr
906 del tr
906 lock.release()
907 lock.release()
907
908
908 def walk(self, match, node=None):
909 def walk(self, match, node=None):
909 '''
910 '''
910 walk recursively through the directory tree or a given
911 walk recursively through the directory tree or a given
911 changeset, finding all files matched by the match
912 changeset, finding all files matched by the match
912 function
913 function
913 '''
914 '''
914 return self[node].walk(match)
915 return self[node].walk(match)
915
916
916 def status(self, node1='.', node2=None, match=None,
917 def status(self, node1='.', node2=None, match=None,
917 ignored=False, clean=False, unknown=False):
918 ignored=False, clean=False, unknown=False):
918 """return status of files between two nodes or node and working directory
919 """return status of files between two nodes or node and working directory
919
920
920 If node1 is None, use the first dirstate parent instead.
921 If node1 is None, use the first dirstate parent instead.
921 If node2 is None, compare node1 with working directory.
922 If node2 is None, compare node1 with working directory.
922 """
923 """
923
924
924 def mfmatches(ctx):
925 def mfmatches(ctx):
925 mf = ctx.manifest().copy()
926 mf = ctx.manifest().copy()
926 for fn in mf.keys():
927 for fn in mf.keys():
927 if not match(fn):
928 if not match(fn):
928 del mf[fn]
929 del mf[fn]
929 return mf
930 return mf
930
931
931 if isinstance(node1, context.changectx):
932 if isinstance(node1, context.changectx):
932 ctx1 = node1
933 ctx1 = node1
933 else:
934 else:
934 ctx1 = self[node1]
935 ctx1 = self[node1]
935 if isinstance(node2, context.changectx):
936 if isinstance(node2, context.changectx):
936 ctx2 = node2
937 ctx2 = node2
937 else:
938 else:
938 ctx2 = self[node2]
939 ctx2 = self[node2]
939
940
940 working = ctx2.rev() is None
941 working = ctx2.rev() is None
941 parentworking = working and ctx1 == self['.']
942 parentworking = working and ctx1 == self['.']
942 match = match or match_.always(self.root, self.getcwd())
943 match = match or match_.always(self.root, self.getcwd())
943 listignored, listclean, listunknown = ignored, clean, unknown
944 listignored, listclean, listunknown = ignored, clean, unknown
944
945
945 # load earliest manifest first for caching reasons
946 # load earliest manifest first for caching reasons
946 if not working and ctx2.rev() < ctx1.rev():
947 if not working and ctx2.rev() < ctx1.rev():
947 ctx2.manifest()
948 ctx2.manifest()
948
949
949 if not parentworking:
950 if not parentworking:
950 def bad(f, msg):
951 def bad(f, msg):
951 if f not in ctx1:
952 if f not in ctx1:
952 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
953 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
953 match.bad = bad
954 match.bad = bad
954
955
955 if working: # we need to scan the working dir
956 if working: # we need to scan the working dir
956 s = self.dirstate.status(match, listignored, listclean, listunknown)
957 s = self.dirstate.status(match, listignored, listclean, listunknown)
957 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
958 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
958
959
959 # check for any possibly clean files
960 # check for any possibly clean files
960 if parentworking and cmp:
961 if parentworking and cmp:
961 fixup = []
962 fixup = []
962 # do a full compare of any files that might have changed
963 # do a full compare of any files that might have changed
963 for f in sorted(cmp):
964 for f in sorted(cmp):
964 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
965 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
965 or ctx1[f].cmp(ctx2[f].data())):
966 or ctx1[f].cmp(ctx2[f].data())):
966 modified.append(f)
967 modified.append(f)
967 else:
968 else:
968 fixup.append(f)
969 fixup.append(f)
969
970
970 if listclean:
971 if listclean:
971 clean += fixup
972 clean += fixup
972
973
973 # update dirstate for files that are actually clean
974 # update dirstate for files that are actually clean
974 if fixup:
975 if fixup:
975 try:
976 try:
976 # updating the dirstate is optional
977 # updating the dirstate is optional
977 # so we don't wait on the lock
978 # so we don't wait on the lock
978 wlock = self.wlock(False)
979 wlock = self.wlock(False)
979 try:
980 try:
980 for f in fixup:
981 for f in fixup:
981 self.dirstate.normal(f)
982 self.dirstate.normal(f)
982 finally:
983 finally:
983 wlock.release()
984 wlock.release()
984 except error.LockError:
985 except error.LockError:
985 pass
986 pass
986
987
987 if not parentworking:
988 if not parentworking:
988 mf1 = mfmatches(ctx1)
989 mf1 = mfmatches(ctx1)
989 if working:
990 if working:
990 # we are comparing working dir against non-parent
991 # we are comparing working dir against non-parent
991 # generate a pseudo-manifest for the working dir
992 # generate a pseudo-manifest for the working dir
992 mf2 = mfmatches(self['.'])
993 mf2 = mfmatches(self['.'])
993 for f in cmp + modified + added:
994 for f in cmp + modified + added:
994 mf2[f] = None
995 mf2[f] = None
995 mf2.set(f, ctx2.flags(f))
996 mf2.set(f, ctx2.flags(f))
996 for f in removed:
997 for f in removed:
997 if f in mf2:
998 if f in mf2:
998 del mf2[f]
999 del mf2[f]
999 else:
1000 else:
1000 # we are comparing two revisions
1001 # we are comparing two revisions
1001 deleted, unknown, ignored = [], [], []
1002 deleted, unknown, ignored = [], [], []
1002 mf2 = mfmatches(ctx2)
1003 mf2 = mfmatches(ctx2)
1003
1004
1004 modified, added, clean = [], [], []
1005 modified, added, clean = [], [], []
1005 for fn in mf2:
1006 for fn in mf2:
1006 if fn in mf1:
1007 if fn in mf1:
1007 if (mf1.flags(fn) != mf2.flags(fn) or
1008 if (mf1.flags(fn) != mf2.flags(fn) or
1008 (mf1[fn] != mf2[fn] and
1009 (mf1[fn] != mf2[fn] and
1009 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1010 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1010 modified.append(fn)
1011 modified.append(fn)
1011 elif listclean:
1012 elif listclean:
1012 clean.append(fn)
1013 clean.append(fn)
1013 del mf1[fn]
1014 del mf1[fn]
1014 else:
1015 else:
1015 added.append(fn)
1016 added.append(fn)
1016 removed = mf1.keys()
1017 removed = mf1.keys()
1017
1018
1018 r = modified, added, removed, deleted, unknown, ignored, clean
1019 r = modified, added, removed, deleted, unknown, ignored, clean
1019 [l.sort() for l in r]
1020 [l.sort() for l in r]
1020 return r
1021 return r
1021
1022
1022 def add(self, list):
1023 def add(self, list):
1023 wlock = self.wlock()
1024 wlock = self.wlock()
1024 try:
1025 try:
1025 rejected = []
1026 rejected = []
1026 for f in list:
1027 for f in list:
1027 p = self.wjoin(f)
1028 p = self.wjoin(f)
1028 try:
1029 try:
1029 st = os.lstat(p)
1030 st = os.lstat(p)
1030 except:
1031 except:
1031 self.ui.warn(_("%s does not exist!\n") % f)
1032 self.ui.warn(_("%s does not exist!\n") % f)
1032 rejected.append(f)
1033 rejected.append(f)
1033 continue
1034 continue
1034 if st.st_size > 10000000:
1035 if st.st_size > 10000000:
1035 self.ui.warn(_("%s: files over 10MB may cause memory and"
1036 self.ui.warn(_("%s: files over 10MB may cause memory and"
1036 " performance problems\n"
1037 " performance problems\n"
1037 "(use 'hg revert %s' to unadd the file)\n")
1038 "(use 'hg revert %s' to unadd the file)\n")
1038 % (f, f))
1039 % (f, f))
1039 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1040 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1040 self.ui.warn(_("%s not added: only files and symlinks "
1041 self.ui.warn(_("%s not added: only files and symlinks "
1041 "supported currently\n") % f)
1042 "supported currently\n") % f)
1042 rejected.append(p)
1043 rejected.append(p)
1043 elif self.dirstate[f] in 'amn':
1044 elif self.dirstate[f] in 'amn':
1044 self.ui.warn(_("%s already tracked!\n") % f)
1045 self.ui.warn(_("%s already tracked!\n") % f)
1045 elif self.dirstate[f] == 'r':
1046 elif self.dirstate[f] == 'r':
1046 self.dirstate.normallookup(f)
1047 self.dirstate.normallookup(f)
1047 else:
1048 else:
1048 self.dirstate.add(f)
1049 self.dirstate.add(f)
1049 return rejected
1050 return rejected
1050 finally:
1051 finally:
1051 wlock.release()
1052 wlock.release()
1052
1053
1053 def forget(self, list):
1054 def forget(self, list):
1054 wlock = self.wlock()
1055 wlock = self.wlock()
1055 try:
1056 try:
1056 for f in list:
1057 for f in list:
1057 if self.dirstate[f] != 'a':
1058 if self.dirstate[f] != 'a':
1058 self.ui.warn(_("%s not added!\n") % f)
1059 self.ui.warn(_("%s not added!\n") % f)
1059 else:
1060 else:
1060 self.dirstate.forget(f)
1061 self.dirstate.forget(f)
1061 finally:
1062 finally:
1062 wlock.release()
1063 wlock.release()
1063
1064
1064 def remove(self, list, unlink=False):
1065 def remove(self, list, unlink=False):
1065 if unlink:
1066 if unlink:
1066 for f in list:
1067 for f in list:
1067 try:
1068 try:
1068 util.unlink(self.wjoin(f))
1069 util.unlink(self.wjoin(f))
1069 except OSError, inst:
1070 except OSError, inst:
1070 if inst.errno != errno.ENOENT:
1071 if inst.errno != errno.ENOENT:
1071 raise
1072 raise
1072 wlock = self.wlock()
1073 wlock = self.wlock()
1073 try:
1074 try:
1074 for f in list:
1075 for f in list:
1075 if unlink and os.path.exists(self.wjoin(f)):
1076 if unlink and os.path.exists(self.wjoin(f)):
1076 self.ui.warn(_("%s still exists!\n") % f)
1077 self.ui.warn(_("%s still exists!\n") % f)
1077 elif self.dirstate[f] == 'a':
1078 elif self.dirstate[f] == 'a':
1078 self.dirstate.forget(f)
1079 self.dirstate.forget(f)
1079 elif f not in self.dirstate:
1080 elif f not in self.dirstate:
1080 self.ui.warn(_("%s not tracked!\n") % f)
1081 self.ui.warn(_("%s not tracked!\n") % f)
1081 else:
1082 else:
1082 self.dirstate.remove(f)
1083 self.dirstate.remove(f)
1083 finally:
1084 finally:
1084 wlock.release()
1085 wlock.release()
1085
1086
1086 def undelete(self, list):
1087 def undelete(self, list):
1087 manifests = [self.manifest.read(self.changelog.read(p)[0])
1088 manifests = [self.manifest.read(self.changelog.read(p)[0])
1088 for p in self.dirstate.parents() if p != nullid]
1089 for p in self.dirstate.parents() if p != nullid]
1089 wlock = self.wlock()
1090 wlock = self.wlock()
1090 try:
1091 try:
1091 for f in list:
1092 for f in list:
1092 if self.dirstate[f] != 'r':
1093 if self.dirstate[f] != 'r':
1093 self.ui.warn(_("%s not removed!\n") % f)
1094 self.ui.warn(_("%s not removed!\n") % f)
1094 else:
1095 else:
1095 m = f in manifests[0] and manifests[0] or manifests[1]
1096 m = f in manifests[0] and manifests[0] or manifests[1]
1096 t = self.file(f).read(m[f])
1097 t = self.file(f).read(m[f])
1097 self.wwrite(f, t, m.flags(f))
1098 self.wwrite(f, t, m.flags(f))
1098 self.dirstate.normal(f)
1099 self.dirstate.normal(f)
1099 finally:
1100 finally:
1100 wlock.release()
1101 wlock.release()
1101
1102
1102 def copy(self, source, dest):
1103 def copy(self, source, dest):
1103 p = self.wjoin(dest)
1104 p = self.wjoin(dest)
1104 if not (os.path.exists(p) or os.path.islink(p)):
1105 if not (os.path.exists(p) or os.path.islink(p)):
1105 self.ui.warn(_("%s does not exist!\n") % dest)
1106 self.ui.warn(_("%s does not exist!\n") % dest)
1106 elif not (os.path.isfile(p) or os.path.islink(p)):
1107 elif not (os.path.isfile(p) or os.path.islink(p)):
1107 self.ui.warn(_("copy failed: %s is not a file or a "
1108 self.ui.warn(_("copy failed: %s is not a file or a "
1108 "symbolic link\n") % dest)
1109 "symbolic link\n") % dest)
1109 else:
1110 else:
1110 wlock = self.wlock()
1111 wlock = self.wlock()
1111 try:
1112 try:
1112 if self.dirstate[dest] in '?r':
1113 if self.dirstate[dest] in '?r':
1113 self.dirstate.add(dest)
1114 self.dirstate.add(dest)
1114 self.dirstate.copy(source, dest)
1115 self.dirstate.copy(source, dest)
1115 finally:
1116 finally:
1116 wlock.release()
1117 wlock.release()
1117
1118
1118 def heads(self, start=None, closed=False):
1119 def heads(self, start=None, closed=False):
1119 heads = self.changelog.heads(start)
1120 heads = self.changelog.heads(start)
1120 def display(head):
1121 def display(head):
1121 if closed:
1122 if closed:
1122 return True
1123 return True
1123 extras = self.changelog.read(head)[5]
1124 extras = self.changelog.read(head)[5]
1124 return ('close' not in extras)
1125 return ('close' not in extras)
1125 # sort the output in rev descending order
1126 # sort the output in rev descending order
1126 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1127 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1127 return [n for (r, n) in sorted(heads)]
1128 return [n for (r, n) in sorted(heads)]
1128
1129
1129 def branchheads(self, branch=None, start=None, closed=False):
1130 def branchheads(self, branch=None, start=None, closed=False):
1130 if branch is None:
1131 if branch is None:
1131 branch = self[None].branch()
1132 branch = self[None].branch()
1132 branches = self.branchmap()
1133 branches = self.branchmap()
1133 if branch not in branches:
1134 if branch not in branches:
1134 return []
1135 return []
1135 bheads = branches[branch]
1136 bheads = branches[branch]
1136 # the cache returns heads ordered lowest to highest
1137 # the cache returns heads ordered lowest to highest
1137 bheads.reverse()
1138 bheads.reverse()
1138 if start is not None:
1139 if start is not None:
1139 # filter out the heads that cannot be reached from startrev
1140 # filter out the heads that cannot be reached from startrev
1140 bheads = self.changelog.nodesbetween([start], bheads)[2]
1141 bheads = self.changelog.nodesbetween([start], bheads)[2]
1141 if not closed:
1142 if not closed:
1142 bheads = [h for h in bheads if
1143 bheads = [h for h in bheads if
1143 ('close' not in self.changelog.read(h)[5])]
1144 ('close' not in self.changelog.read(h)[5])]
1144 return bheads
1145 return bheads
1145
1146
1146 def branches(self, nodes):
1147 def branches(self, nodes):
1147 if not nodes:
1148 if not nodes:
1148 nodes = [self.changelog.tip()]
1149 nodes = [self.changelog.tip()]
1149 b = []
1150 b = []
1150 for n in nodes:
1151 for n in nodes:
1151 t = n
1152 t = n
1152 while 1:
1153 while 1:
1153 p = self.changelog.parents(n)
1154 p = self.changelog.parents(n)
1154 if p[1] != nullid or p[0] == nullid:
1155 if p[1] != nullid or p[0] == nullid:
1155 b.append((t, n, p[0], p[1]))
1156 b.append((t, n, p[0], p[1]))
1156 break
1157 break
1157 n = p[0]
1158 n = p[0]
1158 return b
1159 return b
1159
1160
1160 def between(self, pairs):
1161 def between(self, pairs):
1161 r = []
1162 r = []
1162
1163
1163 for top, bottom in pairs:
1164 for top, bottom in pairs:
1164 n, l, i = top, [], 0
1165 n, l, i = top, [], 0
1165 f = 1
1166 f = 1
1166
1167
1167 while n != bottom and n != nullid:
1168 while n != bottom and n != nullid:
1168 p = self.changelog.parents(n)[0]
1169 p = self.changelog.parents(n)[0]
1169 if i == f:
1170 if i == f:
1170 l.append(n)
1171 l.append(n)
1171 f = f * 2
1172 f = f * 2
1172 n = p
1173 n = p
1173 i += 1
1174 i += 1
1174
1175
1175 r.append(l)
1176 r.append(l)
1176
1177
1177 return r
1178 return r
1178
1179
1179 def findincoming(self, remote, base=None, heads=None, force=False):
1180 def findincoming(self, remote, base=None, heads=None, force=False):
1180 """Return list of roots of the subsets of missing nodes from remote
1181 """Return list of roots of the subsets of missing nodes from remote
1181
1182
1182 If base dict is specified, assume that these nodes and their parents
1183 If base dict is specified, assume that these nodes and their parents
1183 exist on the remote side and that no child of a node of base exists
1184 exist on the remote side and that no child of a node of base exists
1184 in both remote and self.
1185 in both remote and self.
1185 Furthermore base will be updated to include the nodes that exists
1186 Furthermore base will be updated to include the nodes that exists
1186 in self and remote but no children exists in self and remote.
1187 in self and remote but no children exists in self and remote.
1187 If a list of heads is specified, return only nodes which are heads
1188 If a list of heads is specified, return only nodes which are heads
1188 or ancestors of these heads.
1189 or ancestors of these heads.
1189
1190
1190 All the ancestors of base are in self and in remote.
1191 All the ancestors of base are in self and in remote.
1191 All the descendants of the list returned are missing in self.
1192 All the descendants of the list returned are missing in self.
1192 (and so we know that the rest of the nodes are missing in remote, see
1193 (and so we know that the rest of the nodes are missing in remote, see
1193 outgoing)
1194 outgoing)
1194 """
1195 """
1195 return self.findcommonincoming(remote, base, heads, force)[1]
1196 return self.findcommonincoming(remote, base, heads, force)[1]
1196
1197
1197 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1198 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1198 """Return a tuple (common, missing roots, heads) used to identify
1199 """Return a tuple (common, missing roots, heads) used to identify
1199 missing nodes from remote.
1200 missing nodes from remote.
1200
1201
1201 If base dict is specified, assume that these nodes and their parents
1202 If base dict is specified, assume that these nodes and their parents
1202 exist on the remote side and that no child of a node of base exists
1203 exist on the remote side and that no child of a node of base exists
1203 in both remote and self.
1204 in both remote and self.
1204 Furthermore base will be updated to include the nodes that exists
1205 Furthermore base will be updated to include the nodes that exists
1205 in self and remote but no children exists in self and remote.
1206 in self and remote but no children exists in self and remote.
1206 If a list of heads is specified, return only nodes which are heads
1207 If a list of heads is specified, return only nodes which are heads
1207 or ancestors of these heads.
1208 or ancestors of these heads.
1208
1209
1209 All the ancestors of base are in self and in remote.
1210 All the ancestors of base are in self and in remote.
1210 """
1211 """
1211 m = self.changelog.nodemap
1212 m = self.changelog.nodemap
1212 search = []
1213 search = []
1213 fetch = set()
1214 fetch = set()
1214 seen = set()
1215 seen = set()
1215 seenbranch = set()
1216 seenbranch = set()
1216 if base is None:
1217 if base is None:
1217 base = {}
1218 base = {}
1218
1219
1219 if not heads:
1220 if not heads:
1220 heads = remote.heads()
1221 heads = remote.heads()
1221
1222
1222 if self.changelog.tip() == nullid:
1223 if self.changelog.tip() == nullid:
1223 base[nullid] = 1
1224 base[nullid] = 1
1224 if heads != [nullid]:
1225 if heads != [nullid]:
1225 return [nullid], [nullid], list(heads)
1226 return [nullid], [nullid], list(heads)
1226 return [nullid], [], []
1227 return [nullid], [], []
1227
1228
1228 # assume we're closer to the tip than the root
1229 # assume we're closer to the tip than the root
1229 # and start by examining the heads
1230 # and start by examining the heads
1230 self.ui.status(_("searching for changes\n"))
1231 self.ui.status(_("searching for changes\n"))
1231
1232
1232 unknown = []
1233 unknown = []
1233 for h in heads:
1234 for h in heads:
1234 if h not in m:
1235 if h not in m:
1235 unknown.append(h)
1236 unknown.append(h)
1236 else:
1237 else:
1237 base[h] = 1
1238 base[h] = 1
1238
1239
1239 heads = unknown
1240 heads = unknown
1240 if not unknown:
1241 if not unknown:
1241 return base.keys(), [], []
1242 return base.keys(), [], []
1242
1243
1243 req = set(unknown)
1244 req = set(unknown)
1244 reqcnt = 0
1245 reqcnt = 0
1245
1246
1246 # search through remote branches
1247 # search through remote branches
1247 # a 'branch' here is a linear segment of history, with four parts:
1248 # a 'branch' here is a linear segment of history, with four parts:
1248 # head, root, first parent, second parent
1249 # head, root, first parent, second parent
1249 # (a branch always has two parents (or none) by definition)
1250 # (a branch always has two parents (or none) by definition)
1250 unknown = remote.branches(unknown)
1251 unknown = remote.branches(unknown)
1251 while unknown:
1252 while unknown:
1252 r = []
1253 r = []
1253 while unknown:
1254 while unknown:
1254 n = unknown.pop(0)
1255 n = unknown.pop(0)
1255 if n[0] in seen:
1256 if n[0] in seen:
1256 continue
1257 continue
1257
1258
1258 self.ui.debug(_("examining %s:%s\n")
1259 self.ui.debug(_("examining %s:%s\n")
1259 % (short(n[0]), short(n[1])))
1260 % (short(n[0]), short(n[1])))
1260 if n[0] == nullid: # found the end of the branch
1261 if n[0] == nullid: # found the end of the branch
1261 pass
1262 pass
1262 elif n in seenbranch:
1263 elif n in seenbranch:
1263 self.ui.debug(_("branch already found\n"))
1264 self.ui.debug(_("branch already found\n"))
1264 continue
1265 continue
1265 elif n[1] and n[1] in m: # do we know the base?
1266 elif n[1] and n[1] in m: # do we know the base?
1266 self.ui.debug(_("found incomplete branch %s:%s\n")
1267 self.ui.debug(_("found incomplete branch %s:%s\n")
1267 % (short(n[0]), short(n[1])))
1268 % (short(n[0]), short(n[1])))
1268 search.append(n[0:2]) # schedule branch range for scanning
1269 search.append(n[0:2]) # schedule branch range for scanning
1269 seenbranch.add(n)
1270 seenbranch.add(n)
1270 else:
1271 else:
1271 if n[1] not in seen and n[1] not in fetch:
1272 if n[1] not in seen and n[1] not in fetch:
1272 if n[2] in m and n[3] in m:
1273 if n[2] in m and n[3] in m:
1273 self.ui.debug(_("found new changeset %s\n") %
1274 self.ui.debug(_("found new changeset %s\n") %
1274 short(n[1]))
1275 short(n[1]))
1275 fetch.add(n[1]) # earliest unknown
1276 fetch.add(n[1]) # earliest unknown
1276 for p in n[2:4]:
1277 for p in n[2:4]:
1277 if p in m:
1278 if p in m:
1278 base[p] = 1 # latest known
1279 base[p] = 1 # latest known
1279
1280
1280 for p in n[2:4]:
1281 for p in n[2:4]:
1281 if p not in req and p not in m:
1282 if p not in req and p not in m:
1282 r.append(p)
1283 r.append(p)
1283 req.add(p)
1284 req.add(p)
1284 seen.add(n[0])
1285 seen.add(n[0])
1285
1286
1286 if r:
1287 if r:
1287 reqcnt += 1
1288 reqcnt += 1
1288 self.ui.debug(_("request %d: %s\n") %
1289 self.ui.debug(_("request %d: %s\n") %
1289 (reqcnt, " ".join(map(short, r))))
1290 (reqcnt, " ".join(map(short, r))))
1290 for p in xrange(0, len(r), 10):
1291 for p in xrange(0, len(r), 10):
1291 for b in remote.branches(r[p:p+10]):
1292 for b in remote.branches(r[p:p+10]):
1292 self.ui.debug(_("received %s:%s\n") %
1293 self.ui.debug(_("received %s:%s\n") %
1293 (short(b[0]), short(b[1])))
1294 (short(b[0]), short(b[1])))
1294 unknown.append(b)
1295 unknown.append(b)
1295
1296
1296 # do binary search on the branches we found
1297 # do binary search on the branches we found
1297 while search:
1298 while search:
1298 newsearch = []
1299 newsearch = []
1299 reqcnt += 1
1300 reqcnt += 1
1300 for n, l in zip(search, remote.between(search)):
1301 for n, l in zip(search, remote.between(search)):
1301 l.append(n[1])
1302 l.append(n[1])
1302 p = n[0]
1303 p = n[0]
1303 f = 1
1304 f = 1
1304 for i in l:
1305 for i in l:
1305 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1306 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1306 if i in m:
1307 if i in m:
1307 if f <= 2:
1308 if f <= 2:
1308 self.ui.debug(_("found new branch changeset %s\n") %
1309 self.ui.debug(_("found new branch changeset %s\n") %
1309 short(p))
1310 short(p))
1310 fetch.add(p)
1311 fetch.add(p)
1311 base[i] = 1
1312 base[i] = 1
1312 else:
1313 else:
1313 self.ui.debug(_("narrowed branch search to %s:%s\n")
1314 self.ui.debug(_("narrowed branch search to %s:%s\n")
1314 % (short(p), short(i)))
1315 % (short(p), short(i)))
1315 newsearch.append((p, i))
1316 newsearch.append((p, i))
1316 break
1317 break
1317 p, f = i, f * 2
1318 p, f = i, f * 2
1318 search = newsearch
1319 search = newsearch
1319
1320
1320 # sanity check our fetch list
1321 # sanity check our fetch list
1321 for f in fetch:
1322 for f in fetch:
1322 if f in m:
1323 if f in m:
1323 raise error.RepoError(_("already have changeset ")
1324 raise error.RepoError(_("already have changeset ")
1324 + short(f[:4]))
1325 + short(f[:4]))
1325
1326
1326 if base.keys() == [nullid]:
1327 if base.keys() == [nullid]:
1327 if force:
1328 if force:
1328 self.ui.warn(_("warning: repository is unrelated\n"))
1329 self.ui.warn(_("warning: repository is unrelated\n"))
1329 else:
1330 else:
1330 raise util.Abort(_("repository is unrelated"))
1331 raise util.Abort(_("repository is unrelated"))
1331
1332
1332 self.ui.debug(_("found new changesets starting at ") +
1333 self.ui.debug(_("found new changesets starting at ") +
1333 " ".join([short(f) for f in fetch]) + "\n")
1334 " ".join([short(f) for f in fetch]) + "\n")
1334
1335
1335 self.ui.debug(_("%d total queries\n") % reqcnt)
1336 self.ui.debug(_("%d total queries\n") % reqcnt)
1336
1337
1337 return base.keys(), list(fetch), heads
1338 return base.keys(), list(fetch), heads
1338
1339
1339 def findoutgoing(self, remote, base=None, heads=None, force=False):
1340 def findoutgoing(self, remote, base=None, heads=None, force=False):
1340 """Return list of nodes that are roots of subsets not in remote
1341 """Return list of nodes that are roots of subsets not in remote
1341
1342
1342 If base dict is specified, assume that these nodes and their parents
1343 If base dict is specified, assume that these nodes and their parents
1343 exist on the remote side.
1344 exist on the remote side.
1344 If a list of heads is specified, return only nodes which are heads
1345 If a list of heads is specified, return only nodes which are heads
1345 or ancestors of these heads, and return a second element which
1346 or ancestors of these heads, and return a second element which
1346 contains all remote heads which get new children.
1347 contains all remote heads which get new children.
1347 """
1348 """
1348 if base is None:
1349 if base is None:
1349 base = {}
1350 base = {}
1350 self.findincoming(remote, base, heads, force=force)
1351 self.findincoming(remote, base, heads, force=force)
1351
1352
1352 self.ui.debug(_("common changesets up to ")
1353 self.ui.debug(_("common changesets up to ")
1353 + " ".join(map(short, base.keys())) + "\n")
1354 + " ".join(map(short, base.keys())) + "\n")
1354
1355
1355 remain = set(self.changelog.nodemap)
1356 remain = set(self.changelog.nodemap)
1356
1357
1357 # prune everything remote has from the tree
1358 # prune everything remote has from the tree
1358 remain.remove(nullid)
1359 remain.remove(nullid)
1359 remove = base.keys()
1360 remove = base.keys()
1360 while remove:
1361 while remove:
1361 n = remove.pop(0)
1362 n = remove.pop(0)
1362 if n in remain:
1363 if n in remain:
1363 remain.remove(n)
1364 remain.remove(n)
1364 for p in self.changelog.parents(n):
1365 for p in self.changelog.parents(n):
1365 remove.append(p)
1366 remove.append(p)
1366
1367
1367 # find every node whose parents have been pruned
1368 # find every node whose parents have been pruned
1368 subset = []
1369 subset = []
1369 # find every remote head that will get new children
1370 # find every remote head that will get new children
1370 updated_heads = set()
1371 updated_heads = set()
1371 for n in remain:
1372 for n in remain:
1372 p1, p2 = self.changelog.parents(n)
1373 p1, p2 = self.changelog.parents(n)
1373 if p1 not in remain and p2 not in remain:
1374 if p1 not in remain and p2 not in remain:
1374 subset.append(n)
1375 subset.append(n)
1375 if heads:
1376 if heads:
1376 if p1 in heads:
1377 if p1 in heads:
1377 updated_heads.add(p1)
1378 updated_heads.add(p1)
1378 if p2 in heads:
1379 if p2 in heads:
1379 updated_heads.add(p2)
1380 updated_heads.add(p2)
1380
1381
1381 # this is the set of all roots we have to push
1382 # this is the set of all roots we have to push
1382 if heads:
1383 if heads:
1383 return subset, list(updated_heads)
1384 return subset, list(updated_heads)
1384 else:
1385 else:
1385 return subset
1386 return subset
1386
1387
1387 def pull(self, remote, heads=None, force=False):
1388 def pull(self, remote, heads=None, force=False):
1388 lock = self.lock()
1389 lock = self.lock()
1389 try:
1390 try:
1390 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1391 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1391 force=force)
1392 force=force)
1392 if fetch == [nullid]:
1393 if fetch == [nullid]:
1393 self.ui.status(_("requesting all changes\n"))
1394 self.ui.status(_("requesting all changes\n"))
1394
1395
1395 if not fetch:
1396 if not fetch:
1396 self.ui.status(_("no changes found\n"))
1397 self.ui.status(_("no changes found\n"))
1397 return 0
1398 return 0
1398
1399
1399 if heads is None and remote.capable('changegroupsubset'):
1400 if heads is None and remote.capable('changegroupsubset'):
1400 heads = rheads
1401 heads = rheads
1401
1402
1402 if heads is None:
1403 if heads is None:
1403 cg = remote.changegroup(fetch, 'pull')
1404 cg = remote.changegroup(fetch, 'pull')
1404 else:
1405 else:
1405 if not remote.capable('changegroupsubset'):
1406 if not remote.capable('changegroupsubset'):
1406 raise util.Abort(_("Partial pull cannot be done because "
1407 raise util.Abort(_("Partial pull cannot be done because "
1407 "other repository doesn't support "
1408 "other repository doesn't support "
1408 "changegroupsubset."))
1409 "changegroupsubset."))
1409 cg = remote.changegroupsubset(fetch, heads, 'pull')
1410 cg = remote.changegroupsubset(fetch, heads, 'pull')
1410 return self.addchangegroup(cg, 'pull', remote.url())
1411 return self.addchangegroup(cg, 'pull', remote.url())
1411 finally:
1412 finally:
1412 lock.release()
1413 lock.release()
1413
1414
1414 def push(self, remote, force=False, revs=None):
1415 def push(self, remote, force=False, revs=None):
1415 # there are two ways to push to remote repo:
1416 # there are two ways to push to remote repo:
1416 #
1417 #
1417 # addchangegroup assumes local user can lock remote
1418 # addchangegroup assumes local user can lock remote
1418 # repo (local filesystem, old ssh servers).
1419 # repo (local filesystem, old ssh servers).
1419 #
1420 #
1420 # unbundle assumes local user cannot lock remote repo (new ssh
1421 # unbundle assumes local user cannot lock remote repo (new ssh
1421 # servers, http servers).
1422 # servers, http servers).
1422
1423
1423 if remote.capable('unbundle'):
1424 if remote.capable('unbundle'):
1424 return self.push_unbundle(remote, force, revs)
1425 return self.push_unbundle(remote, force, revs)
1425 return self.push_addchangegroup(remote, force, revs)
1426 return self.push_addchangegroup(remote, force, revs)
1426
1427
1427 def prepush(self, remote, force, revs):
1428 def prepush(self, remote, force, revs):
1428 common = {}
1429 common = {}
1429 remote_heads = remote.heads()
1430 remote_heads = remote.heads()
1430 inc = self.findincoming(remote, common, remote_heads, force=force)
1431 inc = self.findincoming(remote, common, remote_heads, force=force)
1431
1432
1432 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1433 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1433 if revs is not None:
1434 if revs is not None:
1434 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1435 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1435 else:
1436 else:
1436 bases, heads = update, self.changelog.heads()
1437 bases, heads = update, self.changelog.heads()
1437
1438
1438 def checkbranch(lheads, rheads, updatelh):
1439 def checkbranch(lheads, rheads, updatelh):
1439 '''
1440 '''
1440 check whether there are more local heads than remote heads on
1441 check whether there are more local heads than remote heads on
1441 a specific branch.
1442 a specific branch.
1442
1443
1443 lheads: local branch heads
1444 lheads: local branch heads
1444 rheads: remote branch heads
1445 rheads: remote branch heads
1445 updatelh: outgoing local branch heads
1446 updatelh: outgoing local branch heads
1446 '''
1447 '''
1447
1448
1448 warn = 0
1449 warn = 0
1449
1450
1450 if not revs and len(lheads) > len(rheads):
1451 if not revs and len(lheads) > len(rheads):
1451 warn = 1
1452 warn = 1
1452 else:
1453 else:
1453 updatelheads = [self.changelog.heads(x, lheads)
1454 updatelheads = [self.changelog.heads(x, lheads)
1454 for x in updatelh]
1455 for x in updatelh]
1455 newheads = set(sum(updatelheads, [])) & set(lheads)
1456 newheads = set(sum(updatelheads, [])) & set(lheads)
1456
1457
1457 if not newheads:
1458 if not newheads:
1458 return True
1459 return True
1459
1460
1460 for r in rheads:
1461 for r in rheads:
1461 if r in self.changelog.nodemap:
1462 if r in self.changelog.nodemap:
1462 desc = self.changelog.heads(r, heads)
1463 desc = self.changelog.heads(r, heads)
1463 l = [h for h in heads if h in desc]
1464 l = [h for h in heads if h in desc]
1464 if not l:
1465 if not l:
1465 newheads.add(r)
1466 newheads.add(r)
1466 else:
1467 else:
1467 newheads.add(r)
1468 newheads.add(r)
1468 if len(newheads) > len(rheads):
1469 if len(newheads) > len(rheads):
1469 warn = 1
1470 warn = 1
1470
1471
1471 if warn:
1472 if warn:
1472 if not rheads: # new branch requires --force
1473 if not rheads: # new branch requires --force
1473 self.ui.warn(_("abort: push creates new"
1474 self.ui.warn(_("abort: push creates new"
1474 " remote branch '%s'!\n" %
1475 " remote branch '%s'!\n" %
1475 self[updatelh[0]].branch()))
1476 self[updatelh[0]].branch()))
1476 else:
1477 else:
1477 self.ui.warn(_("abort: push creates new remote heads!\n"))
1478 self.ui.warn(_("abort: push creates new remote heads!\n"))
1478
1479
1479 self.ui.status(_("(did you forget to merge?"
1480 self.ui.status(_("(did you forget to merge?"
1480 " use push -f to force)\n"))
1481 " use push -f to force)\n"))
1481 return False
1482 return False
1482 return True
1483 return True
1483
1484
1484 if not bases:
1485 if not bases:
1485 self.ui.status(_("no changes found\n"))
1486 self.ui.status(_("no changes found\n"))
1486 return None, 1
1487 return None, 1
1487 elif not force:
1488 elif not force:
1488 # Check for each named branch if we're creating new remote heads.
1489 # Check for each named branch if we're creating new remote heads.
1489 # To be a remote head after push, node must be either:
1490 # To be a remote head after push, node must be either:
1490 # - unknown locally
1491 # - unknown locally
1491 # - a local outgoing head descended from update
1492 # - a local outgoing head descended from update
1492 # - a remote head that's known locally and not
1493 # - a remote head that's known locally and not
1493 # ancestral to an outgoing head
1494 # ancestral to an outgoing head
1494 #
1495 #
1495 # New named branches cannot be created without --force.
1496 # New named branches cannot be created without --force.
1496
1497
1497 if remote_heads != [nullid]:
1498 if remote_heads != [nullid]:
1498 if remote.capable('branchmap'):
1499 if remote.capable('branchmap'):
1499 localhds = {}
1500 localhds = {}
1500 if not revs:
1501 if not revs:
1501 localhds = self.branchmap()
1502 localhds = self.branchmap()
1502 else:
1503 else:
1503 for n in heads:
1504 for n in heads:
1504 branch = self[n].branch()
1505 branch = self[n].branch()
1505 if branch in localhds:
1506 if branch in localhds:
1506 localhds[branch].append(n)
1507 localhds[branch].append(n)
1507 else:
1508 else:
1508 localhds[branch] = [n]
1509 localhds[branch] = [n]
1509
1510
1510 remotehds = remote.branchmap()
1511 remotehds = remote.branchmap()
1511
1512
1512 for lh in localhds:
1513 for lh in localhds:
1513 if lh in remotehds:
1514 if lh in remotehds:
1514 rheads = remotehds[lh]
1515 rheads = remotehds[lh]
1515 else:
1516 else:
1516 rheads = []
1517 rheads = []
1517 lheads = localhds[lh]
1518 lheads = localhds[lh]
1518 updatelh = [upd for upd in update
1519 updatelh = [upd for upd in update
1519 if self[upd].branch() == lh]
1520 if self[upd].branch() == lh]
1520 if not updatelh:
1521 if not updatelh:
1521 continue
1522 continue
1522 if not checkbranch(lheads, rheads, updatelh):
1523 if not checkbranch(lheads, rheads, updatelh):
1523 return None, 0
1524 return None, 0
1524 else:
1525 else:
1525 if not checkbranch(heads, remote_heads, update):
1526 if not checkbranch(heads, remote_heads, update):
1526 return None, 0
1527 return None, 0
1527
1528
1528 if inc:
1529 if inc:
1529 self.ui.warn(_("note: unsynced remote changes!\n"))
1530 self.ui.warn(_("note: unsynced remote changes!\n"))
1530
1531
1531
1532
1532 if revs is None:
1533 if revs is None:
1533 # use the fast path, no race possible on push
1534 # use the fast path, no race possible on push
1534 cg = self._changegroup(common.keys(), 'push')
1535 cg = self._changegroup(common.keys(), 'push')
1535 else:
1536 else:
1536 cg = self.changegroupsubset(update, revs, 'push')
1537 cg = self.changegroupsubset(update, revs, 'push')
1537 return cg, remote_heads
1538 return cg, remote_heads
1538
1539
1539 def push_addchangegroup(self, remote, force, revs):
1540 def push_addchangegroup(self, remote, force, revs):
1540 lock = remote.lock()
1541 lock = remote.lock()
1541 try:
1542 try:
1542 ret = self.prepush(remote, force, revs)
1543 ret = self.prepush(remote, force, revs)
1543 if ret[0] is not None:
1544 if ret[0] is not None:
1544 cg, remote_heads = ret
1545 cg, remote_heads = ret
1545 return remote.addchangegroup(cg, 'push', self.url())
1546 return remote.addchangegroup(cg, 'push', self.url())
1546 return ret[1]
1547 return ret[1]
1547 finally:
1548 finally:
1548 lock.release()
1549 lock.release()
1549
1550
1550 def push_unbundle(self, remote, force, revs):
1551 def push_unbundle(self, remote, force, revs):
1551 # local repo finds heads on server, finds out what revs it
1552 # local repo finds heads on server, finds out what revs it
1552 # must push. once revs transferred, if server finds it has
1553 # must push. once revs transferred, if server finds it has
1553 # different heads (someone else won commit/push race), server
1554 # different heads (someone else won commit/push race), server
1554 # aborts.
1555 # aborts.
1555
1556
1556 ret = self.prepush(remote, force, revs)
1557 ret = self.prepush(remote, force, revs)
1557 if ret[0] is not None:
1558 if ret[0] is not None:
1558 cg, remote_heads = ret
1559 cg, remote_heads = ret
1559 if force: remote_heads = ['force']
1560 if force: remote_heads = ['force']
1560 return remote.unbundle(cg, remote_heads, 'push')
1561 return remote.unbundle(cg, remote_heads, 'push')
1561 return ret[1]
1562 return ret[1]
1562
1563
1563 def changegroupinfo(self, nodes, source):
1564 def changegroupinfo(self, nodes, source):
1564 if self.ui.verbose or source == 'bundle':
1565 if self.ui.verbose or source == 'bundle':
1565 self.ui.status(_("%d changesets found\n") % len(nodes))
1566 self.ui.status(_("%d changesets found\n") % len(nodes))
1566 if self.ui.debugflag:
1567 if self.ui.debugflag:
1567 self.ui.debug(_("list of changesets:\n"))
1568 self.ui.debug(_("list of changesets:\n"))
1568 for node in nodes:
1569 for node in nodes:
1569 self.ui.debug("%s\n" % hex(node))
1570 self.ui.debug("%s\n" % hex(node))
1570
1571
1571 def changegroupsubset(self, bases, heads, source, extranodes=None):
1572 def changegroupsubset(self, bases, heads, source, extranodes=None):
1572 """This function generates a changegroup consisting of all the nodes
1573 """This function generates a changegroup consisting of all the nodes
1573 that are descendents of any of the bases, and ancestors of any of
1574 that are descendents of any of the bases, and ancestors of any of
1574 the heads.
1575 the heads.
1575
1576
1576 It is fairly complex as determining which filenodes and which
1577 It is fairly complex as determining which filenodes and which
1577 manifest nodes need to be included for the changeset to be complete
1578 manifest nodes need to be included for the changeset to be complete
1578 is non-trivial.
1579 is non-trivial.
1579
1580
1580 Another wrinkle is doing the reverse, figuring out which changeset in
1581 Another wrinkle is doing the reverse, figuring out which changeset in
1581 the changegroup a particular filenode or manifestnode belongs to.
1582 the changegroup a particular filenode or manifestnode belongs to.
1582
1583
1583 The caller can specify some nodes that must be included in the
1584 The caller can specify some nodes that must be included in the
1584 changegroup using the extranodes argument. It should be a dict
1585 changegroup using the extranodes argument. It should be a dict
1585 where the keys are the filenames (or 1 for the manifest), and the
1586 where the keys are the filenames (or 1 for the manifest), and the
1586 values are lists of (node, linknode) tuples, where node is a wanted
1587 values are lists of (node, linknode) tuples, where node is a wanted
1587 node and linknode is the changelog node that should be transmitted as
1588 node and linknode is the changelog node that should be transmitted as
1588 the linkrev.
1589 the linkrev.
1589 """
1590 """
1590
1591
1591 if extranodes is None:
1592 if extranodes is None:
1592 # can we go through the fast path ?
1593 # can we go through the fast path ?
1593 heads.sort()
1594 heads.sort()
1594 allheads = self.heads()
1595 allheads = self.heads()
1595 allheads.sort()
1596 allheads.sort()
1596 if heads == allheads:
1597 if heads == allheads:
1597 common = []
1598 common = []
1598 # parents of bases are known from both sides
1599 # parents of bases are known from both sides
1599 for n in bases:
1600 for n in bases:
1600 for p in self.changelog.parents(n):
1601 for p in self.changelog.parents(n):
1601 if p != nullid:
1602 if p != nullid:
1602 common.append(p)
1603 common.append(p)
1603 return self._changegroup(common, source)
1604 return self._changegroup(common, source)
1604
1605
1605 self.hook('preoutgoing', throw=True, source=source)
1606 self.hook('preoutgoing', throw=True, source=source)
1606
1607
1607 # Set up some initial variables
1608 # Set up some initial variables
1608 # Make it easy to refer to self.changelog
1609 # Make it easy to refer to self.changelog
1609 cl = self.changelog
1610 cl = self.changelog
1610 # msng is short for missing - compute the list of changesets in this
1611 # msng is short for missing - compute the list of changesets in this
1611 # changegroup.
1612 # changegroup.
1612 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1613 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1613 self.changegroupinfo(msng_cl_lst, source)
1614 self.changegroupinfo(msng_cl_lst, source)
1614 # Some bases may turn out to be superfluous, and some heads may be
1615 # Some bases may turn out to be superfluous, and some heads may be
1615 # too. nodesbetween will return the minimal set of bases and heads
1616 # too. nodesbetween will return the minimal set of bases and heads
1616 # necessary to re-create the changegroup.
1617 # necessary to re-create the changegroup.
1617
1618
1618 # Known heads are the list of heads that it is assumed the recipient
1619 # Known heads are the list of heads that it is assumed the recipient
1619 # of this changegroup will know about.
1620 # of this changegroup will know about.
1620 knownheads = set()
1621 knownheads = set()
1621 # We assume that all parents of bases are known heads.
1622 # We assume that all parents of bases are known heads.
1622 for n in bases:
1623 for n in bases:
1623 knownheads.update(cl.parents(n))
1624 knownheads.update(cl.parents(n))
1624 knownheads.discard(nullid)
1625 knownheads.discard(nullid)
1625 knownheads = list(knownheads)
1626 knownheads = list(knownheads)
1626 if knownheads:
1627 if knownheads:
1627 # Now that we know what heads are known, we can compute which
1628 # Now that we know what heads are known, we can compute which
1628 # changesets are known. The recipient must know about all
1629 # changesets are known. The recipient must know about all
1629 # changesets required to reach the known heads from the null
1630 # changesets required to reach the known heads from the null
1630 # changeset.
1631 # changeset.
1631 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1632 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1632 junk = None
1633 junk = None
1633 # Transform the list into a set.
1634 # Transform the list into a set.
1634 has_cl_set = set(has_cl_set)
1635 has_cl_set = set(has_cl_set)
1635 else:
1636 else:
1636 # If there were no known heads, the recipient cannot be assumed to
1637 # If there were no known heads, the recipient cannot be assumed to
1637 # know about any changesets.
1638 # know about any changesets.
1638 has_cl_set = set()
1639 has_cl_set = set()
1639
1640
1640 # Make it easy to refer to self.manifest
1641 # Make it easy to refer to self.manifest
1641 mnfst = self.manifest
1642 mnfst = self.manifest
1642 # We don't know which manifests are missing yet
1643 # We don't know which manifests are missing yet
1643 msng_mnfst_set = {}
1644 msng_mnfst_set = {}
1644 # Nor do we know which filenodes are missing.
1645 # Nor do we know which filenodes are missing.
1645 msng_filenode_set = {}
1646 msng_filenode_set = {}
1646
1647
1647 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1648 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1648 junk = None
1649 junk = None
1649
1650
1650 # A changeset always belongs to itself, so the changenode lookup
1651 # A changeset always belongs to itself, so the changenode lookup
1651 # function for a changenode is identity.
1652 # function for a changenode is identity.
1652 def identity(x):
1653 def identity(x):
1653 return x
1654 return x
1654
1655
1655 # A function generating function. Sets up an environment for the
1656 # A function generating function. Sets up an environment for the
1656 # inner function.
1657 # inner function.
1657 def cmp_by_rev_func(revlog):
1658 def cmp_by_rev_func(revlog):
1658 # Compare two nodes by their revision number in the environment's
1659 # Compare two nodes by their revision number in the environment's
1659 # revision history. Since the revision number both represents the
1660 # revision history. Since the revision number both represents the
1660 # most efficient order to read the nodes in, and represents a
1661 # most efficient order to read the nodes in, and represents a
1661 # topological sorting of the nodes, this function is often useful.
1662 # topological sorting of the nodes, this function is often useful.
1662 def cmp_by_rev(a, b):
1663 def cmp_by_rev(a, b):
1663 return cmp(revlog.rev(a), revlog.rev(b))
1664 return cmp(revlog.rev(a), revlog.rev(b))
1664 return cmp_by_rev
1665 return cmp_by_rev
1665
1666
1666 # If we determine that a particular file or manifest node must be a
1667 # If we determine that a particular file or manifest node must be a
1667 # node that the recipient of the changegroup will already have, we can
1668 # node that the recipient of the changegroup will already have, we can
1668 # also assume the recipient will have all the parents. This function
1669 # also assume the recipient will have all the parents. This function
1669 # prunes them from the set of missing nodes.
1670 # prunes them from the set of missing nodes.
1670 def prune_parents(revlog, hasset, msngset):
1671 def prune_parents(revlog, hasset, msngset):
1671 haslst = list(hasset)
1672 haslst = list(hasset)
1672 haslst.sort(cmp_by_rev_func(revlog))
1673 haslst.sort(cmp_by_rev_func(revlog))
1673 for node in haslst:
1674 for node in haslst:
1674 parentlst = [p for p in revlog.parents(node) if p != nullid]
1675 parentlst = [p for p in revlog.parents(node) if p != nullid]
1675 while parentlst:
1676 while parentlst:
1676 n = parentlst.pop()
1677 n = parentlst.pop()
1677 if n not in hasset:
1678 if n not in hasset:
1678 hasset.add(n)
1679 hasset.add(n)
1679 p = [p for p in revlog.parents(n) if p != nullid]
1680 p = [p for p in revlog.parents(n) if p != nullid]
1680 parentlst.extend(p)
1681 parentlst.extend(p)
1681 for n in hasset:
1682 for n in hasset:
1682 msngset.pop(n, None)
1683 msngset.pop(n, None)
1683
1684
1684 # This is a function generating function used to set up an environment
1685 # This is a function generating function used to set up an environment
1685 # for the inner function to execute in.
1686 # for the inner function to execute in.
1686 def manifest_and_file_collector(changedfileset):
1687 def manifest_and_file_collector(changedfileset):
1687 # This is an information gathering function that gathers
1688 # This is an information gathering function that gathers
1688 # information from each changeset node that goes out as part of
1689 # information from each changeset node that goes out as part of
1689 # the changegroup. The information gathered is a list of which
1690 # the changegroup. The information gathered is a list of which
1690 # manifest nodes are potentially required (the recipient may
1691 # manifest nodes are potentially required (the recipient may
1691 # already have them) and total list of all files which were
1692 # already have them) and total list of all files which were
1692 # changed in any changeset in the changegroup.
1693 # changed in any changeset in the changegroup.
1693 #
1694 #
1694 # We also remember the first changenode we saw any manifest
1695 # We also remember the first changenode we saw any manifest
1695 # referenced by so we can later determine which changenode 'owns'
1696 # referenced by so we can later determine which changenode 'owns'
1696 # the manifest.
1697 # the manifest.
1697 def collect_manifests_and_files(clnode):
1698 def collect_manifests_and_files(clnode):
1698 c = cl.read(clnode)
1699 c = cl.read(clnode)
1699 for f in c[3]:
1700 for f in c[3]:
1700 # This is to make sure we only have one instance of each
1701 # This is to make sure we only have one instance of each
1701 # filename string for each filename.
1702 # filename string for each filename.
1702 changedfileset.setdefault(f, f)
1703 changedfileset.setdefault(f, f)
1703 msng_mnfst_set.setdefault(c[0], clnode)
1704 msng_mnfst_set.setdefault(c[0], clnode)
1704 return collect_manifests_and_files
1705 return collect_manifests_and_files
1705
1706
1706 # Figure out which manifest nodes (of the ones we think might be part
1707 # Figure out which manifest nodes (of the ones we think might be part
1707 # of the changegroup) the recipient must know about and remove them
1708 # of the changegroup) the recipient must know about and remove them
1708 # from the changegroup.
1709 # from the changegroup.
1709 def prune_manifests():
1710 def prune_manifests():
1710 has_mnfst_set = set()
1711 has_mnfst_set = set()
1711 for n in msng_mnfst_set:
1712 for n in msng_mnfst_set:
1712 # If a 'missing' manifest thinks it belongs to a changenode
1713 # If a 'missing' manifest thinks it belongs to a changenode
1713 # the recipient is assumed to have, obviously the recipient
1714 # the recipient is assumed to have, obviously the recipient
1714 # must have that manifest.
1715 # must have that manifest.
1715 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1716 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1716 if linknode in has_cl_set:
1717 if linknode in has_cl_set:
1717 has_mnfst_set.add(n)
1718 has_mnfst_set.add(n)
1718 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1719 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1719
1720
1720 # Use the information collected in collect_manifests_and_files to say
1721 # Use the information collected in collect_manifests_and_files to say
1721 # which changenode any manifestnode belongs to.
1722 # which changenode any manifestnode belongs to.
1722 def lookup_manifest_link(mnfstnode):
1723 def lookup_manifest_link(mnfstnode):
1723 return msng_mnfst_set[mnfstnode]
1724 return msng_mnfst_set[mnfstnode]
1724
1725
1725 # A function generating function that sets up the initial environment
1726 # A function generating function that sets up the initial environment
1726 # the inner function.
1727 # the inner function.
1727 def filenode_collector(changedfiles):
1728 def filenode_collector(changedfiles):
1728 next_rev = [0]
1729 next_rev = [0]
1729 # This gathers information from each manifestnode included in the
1730 # This gathers information from each manifestnode included in the
1730 # changegroup about which filenodes the manifest node references
1731 # changegroup about which filenodes the manifest node references
1731 # so we can include those in the changegroup too.
1732 # so we can include those in the changegroup too.
1732 #
1733 #
1733 # It also remembers which changenode each filenode belongs to. It
1734 # It also remembers which changenode each filenode belongs to. It
1734 # does this by assuming the a filenode belongs to the changenode
1735 # does this by assuming the a filenode belongs to the changenode
1735 # the first manifest that references it belongs to.
1736 # the first manifest that references it belongs to.
1736 def collect_msng_filenodes(mnfstnode):
1737 def collect_msng_filenodes(mnfstnode):
1737 r = mnfst.rev(mnfstnode)
1738 r = mnfst.rev(mnfstnode)
1738 if r == next_rev[0]:
1739 if r == next_rev[0]:
1739 # If the last rev we looked at was the one just previous,
1740 # If the last rev we looked at was the one just previous,
1740 # we only need to see a diff.
1741 # we only need to see a diff.
1741 deltamf = mnfst.readdelta(mnfstnode)
1742 deltamf = mnfst.readdelta(mnfstnode)
1742 # For each line in the delta
1743 # For each line in the delta
1743 for f, fnode in deltamf.iteritems():
1744 for f, fnode in deltamf.iteritems():
1744 f = changedfiles.get(f, None)
1745 f = changedfiles.get(f, None)
1745 # And if the file is in the list of files we care
1746 # And if the file is in the list of files we care
1746 # about.
1747 # about.
1747 if f is not None:
1748 if f is not None:
1748 # Get the changenode this manifest belongs to
1749 # Get the changenode this manifest belongs to
1749 clnode = msng_mnfst_set[mnfstnode]
1750 clnode = msng_mnfst_set[mnfstnode]
1750 # Create the set of filenodes for the file if
1751 # Create the set of filenodes for the file if
1751 # there isn't one already.
1752 # there isn't one already.
1752 ndset = msng_filenode_set.setdefault(f, {})
1753 ndset = msng_filenode_set.setdefault(f, {})
1753 # And set the filenode's changelog node to the
1754 # And set the filenode's changelog node to the
1754 # manifest's if it hasn't been set already.
1755 # manifest's if it hasn't been set already.
1755 ndset.setdefault(fnode, clnode)
1756 ndset.setdefault(fnode, clnode)
1756 else:
1757 else:
1757 # Otherwise we need a full manifest.
1758 # Otherwise we need a full manifest.
1758 m = mnfst.read(mnfstnode)
1759 m = mnfst.read(mnfstnode)
1759 # For every file in we care about.
1760 # For every file in we care about.
1760 for f in changedfiles:
1761 for f in changedfiles:
1761 fnode = m.get(f, None)
1762 fnode = m.get(f, None)
1762 # If it's in the manifest
1763 # If it's in the manifest
1763 if fnode is not None:
1764 if fnode is not None:
1764 # See comments above.
1765 # See comments above.
1765 clnode = msng_mnfst_set[mnfstnode]
1766 clnode = msng_mnfst_set[mnfstnode]
1766 ndset = msng_filenode_set.setdefault(f, {})
1767 ndset = msng_filenode_set.setdefault(f, {})
1767 ndset.setdefault(fnode, clnode)
1768 ndset.setdefault(fnode, clnode)
1768 # Remember the revision we hope to see next.
1769 # Remember the revision we hope to see next.
1769 next_rev[0] = r + 1
1770 next_rev[0] = r + 1
1770 return collect_msng_filenodes
1771 return collect_msng_filenodes
1771
1772
1772 # We have a list of filenodes we think we need for a file, lets remove
1773 # We have a list of filenodes we think we need for a file, lets remove
1773 # all those we know the recipient must have.
1774 # all those we know the recipient must have.
1774 def prune_filenodes(f, filerevlog):
1775 def prune_filenodes(f, filerevlog):
1775 msngset = msng_filenode_set[f]
1776 msngset = msng_filenode_set[f]
1776 hasset = set()
1777 hasset = set()
1777 # If a 'missing' filenode thinks it belongs to a changenode we
1778 # If a 'missing' filenode thinks it belongs to a changenode we
1778 # assume the recipient must have, then the recipient must have
1779 # assume the recipient must have, then the recipient must have
1779 # that filenode.
1780 # that filenode.
1780 for n in msngset:
1781 for n in msngset:
1781 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1782 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1782 if clnode in has_cl_set:
1783 if clnode in has_cl_set:
1783 hasset.add(n)
1784 hasset.add(n)
1784 prune_parents(filerevlog, hasset, msngset)
1785 prune_parents(filerevlog, hasset, msngset)
1785
1786
1786 # A function generator function that sets up the a context for the
1787 # A function generator function that sets up the a context for the
1787 # inner function.
1788 # inner function.
1788 def lookup_filenode_link_func(fname):
1789 def lookup_filenode_link_func(fname):
1789 msngset = msng_filenode_set[fname]
1790 msngset = msng_filenode_set[fname]
1790 # Lookup the changenode the filenode belongs to.
1791 # Lookup the changenode the filenode belongs to.
1791 def lookup_filenode_link(fnode):
1792 def lookup_filenode_link(fnode):
1792 return msngset[fnode]
1793 return msngset[fnode]
1793 return lookup_filenode_link
1794 return lookup_filenode_link
1794
1795
1795 # Add the nodes that were explicitly requested.
1796 # Add the nodes that were explicitly requested.
1796 def add_extra_nodes(name, nodes):
1797 def add_extra_nodes(name, nodes):
1797 if not extranodes or name not in extranodes:
1798 if not extranodes or name not in extranodes:
1798 return
1799 return
1799
1800
1800 for node, linknode in extranodes[name]:
1801 for node, linknode in extranodes[name]:
1801 if node not in nodes:
1802 if node not in nodes:
1802 nodes[node] = linknode
1803 nodes[node] = linknode
1803
1804
1804 # Now that we have all theses utility functions to help out and
1805 # Now that we have all theses utility functions to help out and
1805 # logically divide up the task, generate the group.
1806 # logically divide up the task, generate the group.
1806 def gengroup():
1807 def gengroup():
1807 # The set of changed files starts empty.
1808 # The set of changed files starts empty.
1808 changedfiles = {}
1809 changedfiles = {}
1809 # Create a changenode group generator that will call our functions
1810 # Create a changenode group generator that will call our functions
1810 # back to lookup the owning changenode and collect information.
1811 # back to lookup the owning changenode and collect information.
1811 group = cl.group(msng_cl_lst, identity,
1812 group = cl.group(msng_cl_lst, identity,
1812 manifest_and_file_collector(changedfiles))
1813 manifest_and_file_collector(changedfiles))
1813 for chnk in group:
1814 for chnk in group:
1814 yield chnk
1815 yield chnk
1815
1816
1816 # The list of manifests has been collected by the generator
1817 # The list of manifests has been collected by the generator
1817 # calling our functions back.
1818 # calling our functions back.
1818 prune_manifests()
1819 prune_manifests()
1819 add_extra_nodes(1, msng_mnfst_set)
1820 add_extra_nodes(1, msng_mnfst_set)
1820 msng_mnfst_lst = msng_mnfst_set.keys()
1821 msng_mnfst_lst = msng_mnfst_set.keys()
1821 # Sort the manifestnodes by revision number.
1822 # Sort the manifestnodes by revision number.
1822 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1823 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1823 # Create a generator for the manifestnodes that calls our lookup
1824 # Create a generator for the manifestnodes that calls our lookup
1824 # and data collection functions back.
1825 # and data collection functions back.
1825 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1826 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1826 filenode_collector(changedfiles))
1827 filenode_collector(changedfiles))
1827 for chnk in group:
1828 for chnk in group:
1828 yield chnk
1829 yield chnk
1829
1830
1830 # These are no longer needed, dereference and toss the memory for
1831 # These are no longer needed, dereference and toss the memory for
1831 # them.
1832 # them.
1832 msng_mnfst_lst = None
1833 msng_mnfst_lst = None
1833 msng_mnfst_set.clear()
1834 msng_mnfst_set.clear()
1834
1835
1835 if extranodes:
1836 if extranodes:
1836 for fname in extranodes:
1837 for fname in extranodes:
1837 if isinstance(fname, int):
1838 if isinstance(fname, int):
1838 continue
1839 continue
1839 msng_filenode_set.setdefault(fname, {})
1840 msng_filenode_set.setdefault(fname, {})
1840 changedfiles[fname] = 1
1841 changedfiles[fname] = 1
1841 # Go through all our files in order sorted by name.
1842 # Go through all our files in order sorted by name.
1842 for fname in sorted(changedfiles):
1843 for fname in sorted(changedfiles):
1843 filerevlog = self.file(fname)
1844 filerevlog = self.file(fname)
1844 if not len(filerevlog):
1845 if not len(filerevlog):
1845 raise util.Abort(_("empty or missing revlog for %s") % fname)
1846 raise util.Abort(_("empty or missing revlog for %s") % fname)
1846 # Toss out the filenodes that the recipient isn't really
1847 # Toss out the filenodes that the recipient isn't really
1847 # missing.
1848 # missing.
1848 if fname in msng_filenode_set:
1849 if fname in msng_filenode_set:
1849 prune_filenodes(fname, filerevlog)
1850 prune_filenodes(fname, filerevlog)
1850 add_extra_nodes(fname, msng_filenode_set[fname])
1851 add_extra_nodes(fname, msng_filenode_set[fname])
1851 msng_filenode_lst = msng_filenode_set[fname].keys()
1852 msng_filenode_lst = msng_filenode_set[fname].keys()
1852 else:
1853 else:
1853 msng_filenode_lst = []
1854 msng_filenode_lst = []
1854 # If any filenodes are left, generate the group for them,
1855 # If any filenodes are left, generate the group for them,
1855 # otherwise don't bother.
1856 # otherwise don't bother.
1856 if len(msng_filenode_lst) > 0:
1857 if len(msng_filenode_lst) > 0:
1857 yield changegroup.chunkheader(len(fname))
1858 yield changegroup.chunkheader(len(fname))
1858 yield fname
1859 yield fname
1859 # Sort the filenodes by their revision #
1860 # Sort the filenodes by their revision #
1860 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1861 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1861 # Create a group generator and only pass in a changenode
1862 # Create a group generator and only pass in a changenode
1862 # lookup function as we need to collect no information
1863 # lookup function as we need to collect no information
1863 # from filenodes.
1864 # from filenodes.
1864 group = filerevlog.group(msng_filenode_lst,
1865 group = filerevlog.group(msng_filenode_lst,
1865 lookup_filenode_link_func(fname))
1866 lookup_filenode_link_func(fname))
1866 for chnk in group:
1867 for chnk in group:
1867 yield chnk
1868 yield chnk
1868 if fname in msng_filenode_set:
1869 if fname in msng_filenode_set:
1869 # Don't need this anymore, toss it to free memory.
1870 # Don't need this anymore, toss it to free memory.
1870 del msng_filenode_set[fname]
1871 del msng_filenode_set[fname]
1871 # Signal that no more groups are left.
1872 # Signal that no more groups are left.
1872 yield changegroup.closechunk()
1873 yield changegroup.closechunk()
1873
1874
1874 if msng_cl_lst:
1875 if msng_cl_lst:
1875 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1876 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1876
1877
1877 return util.chunkbuffer(gengroup())
1878 return util.chunkbuffer(gengroup())
1878
1879
1879 def changegroup(self, basenodes, source):
1880 def changegroup(self, basenodes, source):
1880 # to avoid a race we use changegroupsubset() (issue1320)
1881 # to avoid a race we use changegroupsubset() (issue1320)
1881 return self.changegroupsubset(basenodes, self.heads(), source)
1882 return self.changegroupsubset(basenodes, self.heads(), source)
1882
1883
1883 def _changegroup(self, common, source):
1884 def _changegroup(self, common, source):
1884 """Generate a changegroup of all nodes that we have that a recipient
1885 """Generate a changegroup of all nodes that we have that a recipient
1885 doesn't.
1886 doesn't.
1886
1887
1887 This is much easier than the previous function as we can assume that
1888 This is much easier than the previous function as we can assume that
1888 the recipient has any changenode we aren't sending them.
1889 the recipient has any changenode we aren't sending them.
1889
1890
1890 common is the set of common nodes between remote and self"""
1891 common is the set of common nodes between remote and self"""
1891
1892
1892 self.hook('preoutgoing', throw=True, source=source)
1893 self.hook('preoutgoing', throw=True, source=source)
1893
1894
1894 cl = self.changelog
1895 cl = self.changelog
1895 nodes = cl.findmissing(common)
1896 nodes = cl.findmissing(common)
1896 revset = set([cl.rev(n) for n in nodes])
1897 revset = set([cl.rev(n) for n in nodes])
1897 self.changegroupinfo(nodes, source)
1898 self.changegroupinfo(nodes, source)
1898
1899
1899 def identity(x):
1900 def identity(x):
1900 return x
1901 return x
1901
1902
1902 def gennodelst(log):
1903 def gennodelst(log):
1903 for r in log:
1904 for r in log:
1904 if log.linkrev(r) in revset:
1905 if log.linkrev(r) in revset:
1905 yield log.node(r)
1906 yield log.node(r)
1906
1907
1907 def changed_file_collector(changedfileset):
1908 def changed_file_collector(changedfileset):
1908 def collect_changed_files(clnode):
1909 def collect_changed_files(clnode):
1909 c = cl.read(clnode)
1910 c = cl.read(clnode)
1910 changedfileset.update(c[3])
1911 changedfileset.update(c[3])
1911 return collect_changed_files
1912 return collect_changed_files
1912
1913
1913 def lookuprevlink_func(revlog):
1914 def lookuprevlink_func(revlog):
1914 def lookuprevlink(n):
1915 def lookuprevlink(n):
1915 return cl.node(revlog.linkrev(revlog.rev(n)))
1916 return cl.node(revlog.linkrev(revlog.rev(n)))
1916 return lookuprevlink
1917 return lookuprevlink
1917
1918
1918 def gengroup():
1919 def gengroup():
1919 # construct a list of all changed files
1920 # construct a list of all changed files
1920 changedfiles = set()
1921 changedfiles = set()
1921
1922
1922 for chnk in cl.group(nodes, identity,
1923 for chnk in cl.group(nodes, identity,
1923 changed_file_collector(changedfiles)):
1924 changed_file_collector(changedfiles)):
1924 yield chnk
1925 yield chnk
1925
1926
1926 mnfst = self.manifest
1927 mnfst = self.manifest
1927 nodeiter = gennodelst(mnfst)
1928 nodeiter = gennodelst(mnfst)
1928 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1929 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1929 yield chnk
1930 yield chnk
1930
1931
1931 for fname in sorted(changedfiles):
1932 for fname in sorted(changedfiles):
1932 filerevlog = self.file(fname)
1933 filerevlog = self.file(fname)
1933 if not len(filerevlog):
1934 if not len(filerevlog):
1934 raise util.Abort(_("empty or missing revlog for %s") % fname)
1935 raise util.Abort(_("empty or missing revlog for %s") % fname)
1935 nodeiter = gennodelst(filerevlog)
1936 nodeiter = gennodelst(filerevlog)
1936 nodeiter = list(nodeiter)
1937 nodeiter = list(nodeiter)
1937 if nodeiter:
1938 if nodeiter:
1938 yield changegroup.chunkheader(len(fname))
1939 yield changegroup.chunkheader(len(fname))
1939 yield fname
1940 yield fname
1940 lookup = lookuprevlink_func(filerevlog)
1941 lookup = lookuprevlink_func(filerevlog)
1941 for chnk in filerevlog.group(nodeiter, lookup):
1942 for chnk in filerevlog.group(nodeiter, lookup):
1942 yield chnk
1943 yield chnk
1943
1944
1944 yield changegroup.closechunk()
1945 yield changegroup.closechunk()
1945
1946
1946 if nodes:
1947 if nodes:
1947 self.hook('outgoing', node=hex(nodes[0]), source=source)
1948 self.hook('outgoing', node=hex(nodes[0]), source=source)
1948
1949
1949 return util.chunkbuffer(gengroup())
1950 return util.chunkbuffer(gengroup())
1950
1951
1951 def addchangegroup(self, source, srctype, url, emptyok=False):
1952 def addchangegroup(self, source, srctype, url, emptyok=False):
1952 """add changegroup to repo.
1953 """add changegroup to repo.
1953
1954
1954 return values:
1955 return values:
1955 - nothing changed or no source: 0
1956 - nothing changed or no source: 0
1956 - more heads than before: 1+added heads (2..n)
1957 - more heads than before: 1+added heads (2..n)
1957 - less heads than before: -1-removed heads (-2..-n)
1958 - less heads than before: -1-removed heads (-2..-n)
1958 - number of heads stays the same: 1
1959 - number of heads stays the same: 1
1959 """
1960 """
1960 def csmap(x):
1961 def csmap(x):
1961 self.ui.debug(_("add changeset %s\n") % short(x))
1962 self.ui.debug(_("add changeset %s\n") % short(x))
1962 return len(cl)
1963 return len(cl)
1963
1964
1964 def revmap(x):
1965 def revmap(x):
1965 return cl.rev(x)
1966 return cl.rev(x)
1966
1967
1967 if not source:
1968 if not source:
1968 return 0
1969 return 0
1969
1970
1970 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1971 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1971
1972
1972 changesets = files = revisions = 0
1973 changesets = files = revisions = 0
1973
1974
1974 # write changelog data to temp files so concurrent readers will not see
1975 # write changelog data to temp files so concurrent readers will not see
1975 # inconsistent view
1976 # inconsistent view
1976 cl = self.changelog
1977 cl = self.changelog
1977 cl.delayupdate()
1978 cl.delayupdate()
1978 oldheads = len(cl.heads())
1979 oldheads = len(cl.heads())
1979
1980
1980 tr = self.transaction()
1981 tr = self.transaction()
1981 try:
1982 try:
1982 trp = weakref.proxy(tr)
1983 trp = weakref.proxy(tr)
1983 # pull off the changeset group
1984 # pull off the changeset group
1984 self.ui.status(_("adding changesets\n"))
1985 self.ui.status(_("adding changesets\n"))
1985 clstart = len(cl)
1986 clstart = len(cl)
1986 chunkiter = changegroup.chunkiter(source)
1987 chunkiter = changegroup.chunkiter(source)
1987 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1988 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1988 raise util.Abort(_("received changelog group is empty"))
1989 raise util.Abort(_("received changelog group is empty"))
1989 clend = len(cl)
1990 clend = len(cl)
1990 changesets = clend - clstart
1991 changesets = clend - clstart
1991
1992
1992 # pull off the manifest group
1993 # pull off the manifest group
1993 self.ui.status(_("adding manifests\n"))
1994 self.ui.status(_("adding manifests\n"))
1994 chunkiter = changegroup.chunkiter(source)
1995 chunkiter = changegroup.chunkiter(source)
1995 # no need to check for empty manifest group here:
1996 # no need to check for empty manifest group here:
1996 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1997 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1997 # no new manifest will be created and the manifest group will
1998 # no new manifest will be created and the manifest group will
1998 # be empty during the pull
1999 # be empty during the pull
1999 self.manifest.addgroup(chunkiter, revmap, trp)
2000 self.manifest.addgroup(chunkiter, revmap, trp)
2000
2001
2001 # process the files
2002 # process the files
2002 self.ui.status(_("adding file changes\n"))
2003 self.ui.status(_("adding file changes\n"))
2003 while 1:
2004 while 1:
2004 f = changegroup.getchunk(source)
2005 f = changegroup.getchunk(source)
2005 if not f:
2006 if not f:
2006 break
2007 break
2007 self.ui.debug(_("adding %s revisions\n") % f)
2008 self.ui.debug(_("adding %s revisions\n") % f)
2008 fl = self.file(f)
2009 fl = self.file(f)
2009 o = len(fl)
2010 o = len(fl)
2010 chunkiter = changegroup.chunkiter(source)
2011 chunkiter = changegroup.chunkiter(source)
2011 if fl.addgroup(chunkiter, revmap, trp) is None:
2012 if fl.addgroup(chunkiter, revmap, trp) is None:
2012 raise util.Abort(_("received file revlog group is empty"))
2013 raise util.Abort(_("received file revlog group is empty"))
2013 revisions += len(fl) - o
2014 revisions += len(fl) - o
2014 files += 1
2015 files += 1
2015
2016
2016 newheads = len(cl.heads())
2017 newheads = len(cl.heads())
2017 heads = ""
2018 heads = ""
2018 if oldheads and newheads != oldheads:
2019 if oldheads and newheads != oldheads:
2019 heads = _(" (%+d heads)") % (newheads - oldheads)
2020 heads = _(" (%+d heads)") % (newheads - oldheads)
2020
2021
2021 self.ui.status(_("added %d changesets"
2022 self.ui.status(_("added %d changesets"
2022 " with %d changes to %d files%s\n")
2023 " with %d changes to %d files%s\n")
2023 % (changesets, revisions, files, heads))
2024 % (changesets, revisions, files, heads))
2024
2025
2025 if changesets > 0:
2026 if changesets > 0:
2026 p = lambda: cl.writepending() and self.root or ""
2027 p = lambda: cl.writepending() and self.root or ""
2027 self.hook('pretxnchangegroup', throw=True,
2028 self.hook('pretxnchangegroup', throw=True,
2028 node=hex(cl.node(clstart)), source=srctype,
2029 node=hex(cl.node(clstart)), source=srctype,
2029 url=url, pending=p)
2030 url=url, pending=p)
2030
2031
2031 # make changelog see real files again
2032 # make changelog see real files again
2032 cl.finalize(trp)
2033 cl.finalize(trp)
2033
2034
2034 tr.close()
2035 tr.close()
2035 finally:
2036 finally:
2036 del tr
2037 del tr
2037
2038
2038 if changesets > 0:
2039 if changesets > 0:
2039 # forcefully update the on-disk branch cache
2040 # forcefully update the on-disk branch cache
2040 self.ui.debug(_("updating the branch cache\n"))
2041 self.ui.debug(_("updating the branch cache\n"))
2041 self.branchtags()
2042 self.branchtags()
2042 self.hook("changegroup", node=hex(cl.node(clstart)),
2043 self.hook("changegroup", node=hex(cl.node(clstart)),
2043 source=srctype, url=url)
2044 source=srctype, url=url)
2044
2045
2045 for i in xrange(clstart, clend):
2046 for i in xrange(clstart, clend):
2046 self.hook("incoming", node=hex(cl.node(i)),
2047 self.hook("incoming", node=hex(cl.node(i)),
2047 source=srctype, url=url)
2048 source=srctype, url=url)
2048
2049
2049 # never return 0 here:
2050 # never return 0 here:
2050 if newheads < oldheads:
2051 if newheads < oldheads:
2051 return newheads - oldheads - 1
2052 return newheads - oldheads - 1
2052 else:
2053 else:
2053 return newheads - oldheads + 1
2054 return newheads - oldheads + 1
2054
2055
2055
2056
2056 def stream_in(self, remote):
2057 def stream_in(self, remote):
2057 fp = remote.stream_out()
2058 fp = remote.stream_out()
2058 l = fp.readline()
2059 l = fp.readline()
2059 try:
2060 try:
2060 resp = int(l)
2061 resp = int(l)
2061 except ValueError:
2062 except ValueError:
2062 raise error.ResponseError(
2063 raise error.ResponseError(
2063 _('Unexpected response from remote server:'), l)
2064 _('Unexpected response from remote server:'), l)
2064 if resp == 1:
2065 if resp == 1:
2065 raise util.Abort(_('operation forbidden by server'))
2066 raise util.Abort(_('operation forbidden by server'))
2066 elif resp == 2:
2067 elif resp == 2:
2067 raise util.Abort(_('locking the remote repository failed'))
2068 raise util.Abort(_('locking the remote repository failed'))
2068 elif resp != 0:
2069 elif resp != 0:
2069 raise util.Abort(_('the server sent an unknown error code'))
2070 raise util.Abort(_('the server sent an unknown error code'))
2070 self.ui.status(_('streaming all changes\n'))
2071 self.ui.status(_('streaming all changes\n'))
2071 l = fp.readline()
2072 l = fp.readline()
2072 try:
2073 try:
2073 total_files, total_bytes = map(int, l.split(' ', 1))
2074 total_files, total_bytes = map(int, l.split(' ', 1))
2074 except (ValueError, TypeError):
2075 except (ValueError, TypeError):
2075 raise error.ResponseError(
2076 raise error.ResponseError(
2076 _('Unexpected response from remote server:'), l)
2077 _('Unexpected response from remote server:'), l)
2077 self.ui.status(_('%d files to transfer, %s of data\n') %
2078 self.ui.status(_('%d files to transfer, %s of data\n') %
2078 (total_files, util.bytecount(total_bytes)))
2079 (total_files, util.bytecount(total_bytes)))
2079 start = time.time()
2080 start = time.time()
2080 for i in xrange(total_files):
2081 for i in xrange(total_files):
2081 # XXX doesn't support '\n' or '\r' in filenames
2082 # XXX doesn't support '\n' or '\r' in filenames
2082 l = fp.readline()
2083 l = fp.readline()
2083 try:
2084 try:
2084 name, size = l.split('\0', 1)
2085 name, size = l.split('\0', 1)
2085 size = int(size)
2086 size = int(size)
2086 except (ValueError, TypeError):
2087 except (ValueError, TypeError):
2087 raise error.ResponseError(
2088 raise error.ResponseError(
2088 _('Unexpected response from remote server:'), l)
2089 _('Unexpected response from remote server:'), l)
2089 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2090 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2090 # for backwards compat, name was partially encoded
2091 # for backwards compat, name was partially encoded
2091 ofp = self.sopener(store.decodedir(name), 'w')
2092 ofp = self.sopener(store.decodedir(name), 'w')
2092 for chunk in util.filechunkiter(fp, limit=size):
2093 for chunk in util.filechunkiter(fp, limit=size):
2093 ofp.write(chunk)
2094 ofp.write(chunk)
2094 ofp.close()
2095 ofp.close()
2095 elapsed = time.time() - start
2096 elapsed = time.time() - start
2096 if elapsed <= 0:
2097 if elapsed <= 0:
2097 elapsed = 0.001
2098 elapsed = 0.001
2098 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2099 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2099 (util.bytecount(total_bytes), elapsed,
2100 (util.bytecount(total_bytes), elapsed,
2100 util.bytecount(total_bytes / elapsed)))
2101 util.bytecount(total_bytes / elapsed)))
2101 self.invalidate()
2102 self.invalidate()
2102 return len(self.heads()) + 1
2103 return len(self.heads()) + 1
2103
2104
2104 def clone(self, remote, heads=[], stream=False):
2105 def clone(self, remote, heads=[], stream=False):
2105 '''clone remote repository.
2106 '''clone remote repository.
2106
2107
2107 keyword arguments:
2108 keyword arguments:
2108 heads: list of revs to clone (forces use of pull)
2109 heads: list of revs to clone (forces use of pull)
2109 stream: use streaming clone if possible'''
2110 stream: use streaming clone if possible'''
2110
2111
2111 # now, all clients that can request uncompressed clones can
2112 # now, all clients that can request uncompressed clones can
2112 # read repo formats supported by all servers that can serve
2113 # read repo formats supported by all servers that can serve
2113 # them.
2114 # them.
2114
2115
2115 # if revlog format changes, client will have to check version
2116 # if revlog format changes, client will have to check version
2116 # and format flags on "stream" capability, and use
2117 # and format flags on "stream" capability, and use
2117 # uncompressed only if compatible.
2118 # uncompressed only if compatible.
2118
2119
2119 if stream and not heads and remote.capable('stream'):
2120 if stream and not heads and remote.capable('stream'):
2120 return self.stream_in(remote)
2121 return self.stream_in(remote)
2121 return self.pull(remote, heads)
2122 return self.pull(remote, heads)
2122
2123
2123 # used to avoid circular references so destructors work
2124 # used to avoid circular references so destructors work
2124 def aftertrans(files):
2125 def aftertrans(files):
2125 renamefiles = [tuple(t) for t in files]
2126 renamefiles = [tuple(t) for t in files]
2126 def a():
2127 def a():
2127 for src, dest in renamefiles:
2128 for src, dest in renamefiles:
2128 util.rename(src, dest)
2129 util.rename(src, dest)
2129 return a
2130 return a
2130
2131
2131 def instance(ui, path, create):
2132 def instance(ui, path, create):
2132 return localrepository(ui, util.drop_scheme('file', path), create)
2133 return localrepository(ui, util.drop_scheme('file', path), create)
2133
2134
2134 def islocal(path):
2135 def islocal(path):
2135 return True
2136 return True
General Comments 0
You need to be logged in to leave comments. Login now