##// END OF EJS Templates
configitems: register the 'format.obsstore-version' config
marmoute -
r33241:fd50788a default
parent child Browse files
Show More
@@ -1,189 +1,192 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 )
14 )
15
15
16 def loadconfigtable(ui, extname, configtable):
16 def loadconfigtable(ui, extname, configtable):
17 """update config item known to the ui with the extension ones"""
17 """update config item known to the ui with the extension ones"""
18 for section, items in configtable.items():
18 for section, items in configtable.items():
19 knownitems = ui._knownconfig.setdefault(section, {})
19 knownitems = ui._knownconfig.setdefault(section, {})
20 knownkeys = set(knownitems)
20 knownkeys = set(knownitems)
21 newkeys = set(items)
21 newkeys = set(items)
22 for key in sorted(knownkeys & newkeys):
22 for key in sorted(knownkeys & newkeys):
23 msg = "extension '%s' overwrite config item '%s.%s'"
23 msg = "extension '%s' overwrite config item '%s.%s'"
24 msg %= (extname, section, key)
24 msg %= (extname, section, key)
25 ui.develwarn(msg, config='warn-config')
25 ui.develwarn(msg, config='warn-config')
26
26
27 knownitems.update(items)
27 knownitems.update(items)
28
28
29 class configitem(object):
29 class configitem(object):
30 """represent a known config item
30 """represent a known config item
31
31
32 :section: the official config section where to find this item,
32 :section: the official config section where to find this item,
33 :name: the official name within the section,
33 :name: the official name within the section,
34 :default: default value for this item,
34 :default: default value for this item,
35 """
35 """
36
36
37 def __init__(self, section, name, default=None):
37 def __init__(self, section, name, default=None):
38 self.section = section
38 self.section = section
39 self.name = name
39 self.name = name
40 self.default = default
40 self.default = default
41
41
42 coreitems = {}
42 coreitems = {}
43
43
44 def _register(configtable, *args, **kwargs):
44 def _register(configtable, *args, **kwargs):
45 item = configitem(*args, **kwargs)
45 item = configitem(*args, **kwargs)
46 section = configtable.setdefault(item.section, {})
46 section = configtable.setdefault(item.section, {})
47 if item.name in section:
47 if item.name in section:
48 msg = "duplicated config item registration for '%s.%s'"
48 msg = "duplicated config item registration for '%s.%s'"
49 raise error.ProgrammingError(msg % (item.section, item.name))
49 raise error.ProgrammingError(msg % (item.section, item.name))
50 section[item.name] = item
50 section[item.name] = item
51
51
52 # Registering actual config items
52 # Registering actual config items
53
53
54 def getitemregister(configtable):
54 def getitemregister(configtable):
55 return functools.partial(_register, configtable)
55 return functools.partial(_register, configtable)
56
56
57 coreconfigitem = getitemregister(coreitems)
57 coreconfigitem = getitemregister(coreitems)
58
58
59 coreconfigitem('auth', 'cookiefile',
59 coreconfigitem('auth', 'cookiefile',
60 default=None,
60 default=None,
61 )
61 )
62 # bookmarks.pushing: internal hack for discovery
62 # bookmarks.pushing: internal hack for discovery
63 coreconfigitem('bookmarks', 'pushing',
63 coreconfigitem('bookmarks', 'pushing',
64 default=list,
64 default=list,
65 )
65 )
66 # bundle.mainreporoot: internal hack for bundlerepo
66 # bundle.mainreporoot: internal hack for bundlerepo
67 coreconfigitem('bundle', 'mainreporoot',
67 coreconfigitem('bundle', 'mainreporoot',
68 default='',
68 default='',
69 )
69 )
70 # bundle.reorder: experimental config
70 # bundle.reorder: experimental config
71 coreconfigitem('bundle', 'reorder',
71 coreconfigitem('bundle', 'reorder',
72 default='auto',
72 default='auto',
73 )
73 )
74 coreconfigitem('color', 'mode',
74 coreconfigitem('color', 'mode',
75 default='auto',
75 default='auto',
76 )
76 )
77 coreconfigitem('devel', 'all-warnings',
77 coreconfigitem('devel', 'all-warnings',
78 default=False,
78 default=False,
79 )
79 )
80 coreconfigitem('devel', 'bundle2.debug',
80 coreconfigitem('devel', 'bundle2.debug',
81 default=False,
81 default=False,
82 )
82 )
83 coreconfigitem('devel', 'check-locks',
83 coreconfigitem('devel', 'check-locks',
84 default=False,
84 default=False,
85 )
85 )
86 coreconfigitem('devel', 'check-relroot',
86 coreconfigitem('devel', 'check-relroot',
87 default=False,
87 default=False,
88 )
88 )
89 coreconfigitem('devel', 'disableloaddefaultcerts',
89 coreconfigitem('devel', 'disableloaddefaultcerts',
90 default=False,
90 default=False,
91 )
91 )
92 coreconfigitem('devel', 'legacy.exchange',
92 coreconfigitem('devel', 'legacy.exchange',
93 default=list,
93 default=list,
94 )
94 )
95 coreconfigitem('devel', 'servercafile',
95 coreconfigitem('devel', 'servercafile',
96 default='',
96 default='',
97 )
97 )
98 coreconfigitem('devel', 'serverexactprotocol',
98 coreconfigitem('devel', 'serverexactprotocol',
99 default='',
99 default='',
100 )
100 )
101 coreconfigitem('devel', 'serverrequirecert',
101 coreconfigitem('devel', 'serverrequirecert',
102 default=False,
102 default=False,
103 )
103 )
104 coreconfigitem('devel', 'strip-obsmarkers',
104 coreconfigitem('devel', 'strip-obsmarkers',
105 default=True,
105 default=True,
106 )
106 )
107 coreconfigitem('format', 'aggressivemergedeltas',
107 coreconfigitem('format', 'aggressivemergedeltas',
108 default=False,
108 default=False,
109 )
109 )
110 coreconfigitem('format', 'chunkcachesize',
110 coreconfigitem('format', 'chunkcachesize',
111 default=None,
111 default=None,
112 )
112 )
113 coreconfigitem('format', 'dotencode',
113 coreconfigitem('format', 'dotencode',
114 default=True,
114 default=True,
115 )
115 )
116 coreconfigitem('format', 'generaldelta',
116 coreconfigitem('format', 'generaldelta',
117 default=False,
117 default=False,
118 )
118 )
119 coreconfigitem('format', 'manifestcachesize',
119 coreconfigitem('format', 'manifestcachesize',
120 default=None,
120 default=None,
121 )
121 )
122 coreconfigitem('format', 'maxchainlen',
122 coreconfigitem('format', 'maxchainlen',
123 default=None,
123 default=None,
124 )
124 )
125 coreconfigitem('format', 'obsstore-version',
126 default=None,
127 )
125 coreconfigitem('hostsecurity', 'ciphers',
128 coreconfigitem('hostsecurity', 'ciphers',
126 default=None,
129 default=None,
127 )
130 )
128 coreconfigitem('hostsecurity', 'disabletls10warning',
131 coreconfigitem('hostsecurity', 'disabletls10warning',
129 default=False,
132 default=False,
130 )
133 )
131 coreconfigitem('patch', 'eol',
134 coreconfigitem('patch', 'eol',
132 default='strict',
135 default='strict',
133 )
136 )
134 coreconfigitem('patch', 'fuzz',
137 coreconfigitem('patch', 'fuzz',
135 default=2,
138 default=2,
136 )
139 )
137 coreconfigitem('server', 'bundle1',
140 coreconfigitem('server', 'bundle1',
138 default=True,
141 default=True,
139 )
142 )
140 coreconfigitem('server', 'bundle1gd',
143 coreconfigitem('server', 'bundle1gd',
141 default=None,
144 default=None,
142 )
145 )
143 coreconfigitem('server', 'compressionengines',
146 coreconfigitem('server', 'compressionengines',
144 default=list,
147 default=list,
145 )
148 )
146 coreconfigitem('server', 'concurrent-push-mode',
149 coreconfigitem('server', 'concurrent-push-mode',
147 default='strict',
150 default='strict',
148 )
151 )
149 coreconfigitem('server', 'disablefullbundle',
152 coreconfigitem('server', 'disablefullbundle',
150 default=False,
153 default=False,
151 )
154 )
152 coreconfigitem('server', 'maxhttpheaderlen',
155 coreconfigitem('server', 'maxhttpheaderlen',
153 default=1024,
156 default=1024,
154 )
157 )
155 coreconfigitem('server', 'preferuncompressed',
158 coreconfigitem('server', 'preferuncompressed',
156 default=False,
159 default=False,
157 )
160 )
158 coreconfigitem('server', 'uncompressedallowsecret',
161 coreconfigitem('server', 'uncompressedallowsecret',
159 default=False,
162 default=False,
160 )
163 )
161 coreconfigitem('server', 'validate',
164 coreconfigitem('server', 'validate',
162 default=False,
165 default=False,
163 )
166 )
164 coreconfigitem('server', 'zliblevel',
167 coreconfigitem('server', 'zliblevel',
165 default=-1,
168 default=-1,
166 )
169 )
167 coreconfigitem('ui', 'clonebundleprefers',
170 coreconfigitem('ui', 'clonebundleprefers',
168 default=list,
171 default=list,
169 )
172 )
170 coreconfigitem('ui', 'interactive',
173 coreconfigitem('ui', 'interactive',
171 default=None,
174 default=None,
172 )
175 )
173 coreconfigitem('ui', 'quiet',
176 coreconfigitem('ui', 'quiet',
174 default=False,
177 default=False,
175 )
178 )
176 # Windows defaults to a limit of 512 open files. A buffer of 128
179 # Windows defaults to a limit of 512 open files. A buffer of 128
177 # should give us enough headway.
180 # should give us enough headway.
178 coreconfigitem('worker', 'backgroundclosemaxqueue',
181 coreconfigitem('worker', 'backgroundclosemaxqueue',
179 default=384,
182 default=384,
180 )
183 )
181 coreconfigitem('worker', 'backgroundcloseminfilecount',
184 coreconfigitem('worker', 'backgroundcloseminfilecount',
182 default=2048,
185 default=2048,
183 )
186 )
184 coreconfigitem('worker', 'backgroundclosethreadcount',
187 coreconfigitem('worker', 'backgroundclosethreadcount',
185 default=4,
188 default=4,
186 )
189 )
187 coreconfigitem('worker', 'numcpus',
190 coreconfigitem('worker', 'numcpus',
188 default=None,
191 default=None,
189 )
192 )
@@ -1,1031 +1,1031 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'evolution'))
105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off):
181 def _fm0readmarkers(data, off):
182 # Loop on markers
182 # Loop on markers
183 l = len(data)
183 l = len(data)
184 while off + _fm0fsize <= l:
184 while off + _fm0fsize <= l:
185 # read fixed part
185 # read fixed part
186 cur = data[off:off + _fm0fsize]
186 cur = data[off:off + _fm0fsize]
187 off += _fm0fsize
187 off += _fm0fsize
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 # read replacement
189 # read replacement
190 sucs = ()
190 sucs = ()
191 if numsuc:
191 if numsuc:
192 s = (_fm0fnodesize * numsuc)
192 s = (_fm0fnodesize * numsuc)
193 cur = data[off:off + s]
193 cur = data[off:off + s]
194 sucs = _unpack(_fm0node * numsuc, cur)
194 sucs = _unpack(_fm0node * numsuc, cur)
195 off += s
195 off += s
196 # read metadata
196 # read metadata
197 # (metadata will be decoded on demand)
197 # (metadata will be decoded on demand)
198 metadata = data[off:off + mdsize]
198 metadata = data[off:off + mdsize]
199 if len(metadata) != mdsize:
199 if len(metadata) != mdsize:
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 'short, %d bytes expected, got %d')
201 'short, %d bytes expected, got %d')
202 % (mdsize, len(metadata)))
202 % (mdsize, len(metadata)))
203 off += mdsize
203 off += mdsize
204 metadata = _fm0decodemeta(metadata)
204 metadata = _fm0decodemeta(metadata)
205 try:
205 try:
206 when, offset = metadata.pop('date', '0 0').split(' ')
206 when, offset = metadata.pop('date', '0 0').split(' ')
207 date = float(when), int(offset)
207 date = float(when), int(offset)
208 except ValueError:
208 except ValueError:
209 date = (0., 0)
209 date = (0., 0)
210 parents = None
210 parents = None
211 if 'p2' in metadata:
211 if 'p2' in metadata:
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 elif 'p1' in metadata:
213 elif 'p1' in metadata:
214 parents = (metadata.pop('p1', None),)
214 parents = (metadata.pop('p1', None),)
215 elif 'p0' in metadata:
215 elif 'p0' in metadata:
216 parents = ()
216 parents = ()
217 if parents is not None:
217 if parents is not None:
218 try:
218 try:
219 parents = tuple(node.bin(p) for p in parents)
219 parents = tuple(node.bin(p) for p in parents)
220 # if parent content is not a nodeid, drop the data
220 # if parent content is not a nodeid, drop the data
221 for p in parents:
221 for p in parents:
222 if len(p) != 20:
222 if len(p) != 20:
223 parents = None
223 parents = None
224 break
224 break
225 except TypeError:
225 except TypeError:
226 # if content cannot be translated to nodeid drop the data.
226 # if content cannot be translated to nodeid drop the data.
227 parents = None
227 parents = None
228
228
229 metadata = tuple(sorted(metadata.iteritems()))
229 metadata = tuple(sorted(metadata.iteritems()))
230
230
231 yield (pre, sucs, flags, metadata, date, parents)
231 yield (pre, sucs, flags, metadata, date, parents)
232
232
233 def _fm0encodeonemarker(marker):
233 def _fm0encodeonemarker(marker):
234 pre, sucs, flags, metadata, date, parents = marker
234 pre, sucs, flags, metadata, date, parents = marker
235 if flags & usingsha256:
235 if flags & usingsha256:
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 metadata = dict(metadata)
237 metadata = dict(metadata)
238 time, tz = date
238 time, tz = date
239 metadata['date'] = '%r %i' % (time, tz)
239 metadata['date'] = '%r %i' % (time, tz)
240 if parents is not None:
240 if parents is not None:
241 if not parents:
241 if not parents:
242 # mark that we explicitly recorded no parents
242 # mark that we explicitly recorded no parents
243 metadata['p0'] = ''
243 metadata['p0'] = ''
244 for i, p in enumerate(parents, 1):
244 for i, p in enumerate(parents, 1):
245 metadata['p%i' % i] = node.hex(p)
245 metadata['p%i' % i] = node.hex(p)
246 metadata = _fm0encodemeta(metadata)
246 metadata = _fm0encodemeta(metadata)
247 numsuc = len(sucs)
247 numsuc = len(sucs)
248 format = _fm0fixed + (_fm0node * numsuc)
248 format = _fm0fixed + (_fm0node * numsuc)
249 data = [numsuc, len(metadata), flags, pre]
249 data = [numsuc, len(metadata), flags, pre]
250 data.extend(sucs)
250 data.extend(sucs)
251 return _pack(format, *data) + metadata
251 return _pack(format, *data) + metadata
252
252
253 def _fm0encodemeta(meta):
253 def _fm0encodemeta(meta):
254 """Return encoded metadata string to string mapping.
254 """Return encoded metadata string to string mapping.
255
255
256 Assume no ':' in key and no '\0' in both key and value."""
256 Assume no ':' in key and no '\0' in both key and value."""
257 for key, value in meta.iteritems():
257 for key, value in meta.iteritems():
258 if ':' in key or '\0' in key:
258 if ':' in key or '\0' in key:
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 if '\0' in value:
260 if '\0' in value:
261 raise ValueError("':' is forbidden in metadata value'")
261 raise ValueError("':' is forbidden in metadata value'")
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263
263
264 def _fm0decodemeta(data):
264 def _fm0decodemeta(data):
265 """Return string to string dictionary from encoded version."""
265 """Return string to string dictionary from encoded version."""
266 d = {}
266 d = {}
267 for l in data.split('\0'):
267 for l in data.split('\0'):
268 if l:
268 if l:
269 key, value = l.split(':')
269 key, value = l.split(':')
270 d[key] = value
270 d[key] = value
271 return d
271 return d
272
272
273 ## Parsing and writing of version "1"
273 ## Parsing and writing of version "1"
274 #
274 #
275 # The header is followed by the markers. Each marker is made of:
275 # The header is followed by the markers. Each marker is made of:
276 #
276 #
277 # - uint32: total size of the marker (including this field)
277 # - uint32: total size of the marker (including this field)
278 #
278 #
279 # - float64: date in seconds since epoch
279 # - float64: date in seconds since epoch
280 #
280 #
281 # - int16: timezone offset in minutes
281 # - int16: timezone offset in minutes
282 #
282 #
283 # - uint16: a bit field. It is reserved for flags used in common
283 # - uint16: a bit field. It is reserved for flags used in common
284 # obsolete marker operations, to avoid repeated decoding of metadata
284 # obsolete marker operations, to avoid repeated decoding of metadata
285 # entries.
285 # entries.
286 #
286 #
287 # - uint8: number of successors "N", can be zero.
287 # - uint8: number of successors "N", can be zero.
288 #
288 #
289 # - uint8: number of parents "P", can be zero.
289 # - uint8: number of parents "P", can be zero.
290 #
290 #
291 # 0: parents data stored but no parent,
291 # 0: parents data stored but no parent,
292 # 1: one parent stored,
292 # 1: one parent stored,
293 # 2: two parents stored,
293 # 2: two parents stored,
294 # 3: no parent data stored
294 # 3: no parent data stored
295 #
295 #
296 # - uint8: number of metadata entries M
296 # - uint8: number of metadata entries M
297 #
297 #
298 # - 20 or 32 bytes: precursor changeset identifier.
298 # - 20 or 32 bytes: precursor changeset identifier.
299 #
299 #
300 # - N*(20 or 32) bytes: successors changesets identifiers.
300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 #
301 #
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 #
303 #
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 #
305 #
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 _fm1version = 1
307 _fm1version = 1
308 _fm1fixed = '>IdhHBBB20s'
308 _fm1fixed = '>IdhHBBB20s'
309 _fm1nodesha1 = '20s'
309 _fm1nodesha1 = '20s'
310 _fm1nodesha256 = '32s'
310 _fm1nodesha256 = '32s'
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1fsize = _calcsize(_fm1fixed)
314 _fm1parentnone = 3
314 _fm1parentnone = 3
315 _fm1parentshift = 14
315 _fm1parentshift = 14
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 _fm1metapair = 'BB'
317 _fm1metapair = 'BB'
318 _fm1metapairsize = _calcsize('BB')
318 _fm1metapairsize = _calcsize('BB')
319
319
320 def _fm1purereadmarkers(data, off):
320 def _fm1purereadmarkers(data, off):
321 # make some global constants local for performance
321 # make some global constants local for performance
322 noneflag = _fm1parentnone
322 noneflag = _fm1parentnone
323 sha2flag = usingsha256
323 sha2flag = usingsha256
324 sha1size = _fm1nodesha1size
324 sha1size = _fm1nodesha1size
325 sha2size = _fm1nodesha256size
325 sha2size = _fm1nodesha256size
326 sha1fmt = _fm1nodesha1
326 sha1fmt = _fm1nodesha1
327 sha2fmt = _fm1nodesha256
327 sha2fmt = _fm1nodesha256
328 metasize = _fm1metapairsize
328 metasize = _fm1metapairsize
329 metafmt = _fm1metapair
329 metafmt = _fm1metapair
330 fsize = _fm1fsize
330 fsize = _fm1fsize
331 unpack = _unpack
331 unpack = _unpack
332
332
333 # Loop on markers
333 # Loop on markers
334 stop = len(data) - _fm1fsize
334 stop = len(data) - _fm1fsize
335 ufixed = struct.Struct(_fm1fixed).unpack
335 ufixed = struct.Struct(_fm1fixed).unpack
336
336
337 while off <= stop:
337 while off <= stop:
338 # read fixed part
338 # read fixed part
339 o1 = off + fsize
339 o1 = off + fsize
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341
341
342 if flags & sha2flag:
342 if flags & sha2flag:
343 # FIXME: prec was read as a SHA1, needs to be amended
343 # FIXME: prec was read as a SHA1, needs to be amended
344
344
345 # read 0 or more successors
345 # read 0 or more successors
346 if numsuc == 1:
346 if numsuc == 1:
347 o2 = o1 + sha2size
347 o2 = o1 + sha2size
348 sucs = (data[o1:o2],)
348 sucs = (data[o1:o2],)
349 else:
349 else:
350 o2 = o1 + sha2size * numsuc
350 o2 = o1 + sha2size * numsuc
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352
352
353 # read parents
353 # read parents
354 if numpar == noneflag:
354 if numpar == noneflag:
355 o3 = o2
355 o3 = o2
356 parents = None
356 parents = None
357 elif numpar == 1:
357 elif numpar == 1:
358 o3 = o2 + sha2size
358 o3 = o2 + sha2size
359 parents = (data[o2:o3],)
359 parents = (data[o2:o3],)
360 else:
360 else:
361 o3 = o2 + sha2size * numpar
361 o3 = o2 + sha2size * numpar
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 else:
363 else:
364 # read 0 or more successors
364 # read 0 or more successors
365 if numsuc == 1:
365 if numsuc == 1:
366 o2 = o1 + sha1size
366 o2 = o1 + sha1size
367 sucs = (data[o1:o2],)
367 sucs = (data[o1:o2],)
368 else:
368 else:
369 o2 = o1 + sha1size * numsuc
369 o2 = o1 + sha1size * numsuc
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371
371
372 # read parents
372 # read parents
373 if numpar == noneflag:
373 if numpar == noneflag:
374 o3 = o2
374 o3 = o2
375 parents = None
375 parents = None
376 elif numpar == 1:
376 elif numpar == 1:
377 o3 = o2 + sha1size
377 o3 = o2 + sha1size
378 parents = (data[o2:o3],)
378 parents = (data[o2:o3],)
379 else:
379 else:
380 o3 = o2 + sha1size * numpar
380 o3 = o2 + sha1size * numpar
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382
382
383 # read metadata
383 # read metadata
384 off = o3 + metasize * nummeta
384 off = o3 + metasize * nummeta
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 metadata = []
386 metadata = []
387 for idx in xrange(0, len(metapairsize), 2):
387 for idx in xrange(0, len(metapairsize), 2):
388 o1 = off + metapairsize[idx]
388 o1 = off + metapairsize[idx]
389 o2 = o1 + metapairsize[idx + 1]
389 o2 = o1 + metapairsize[idx + 1]
390 metadata.append((data[off:o1], data[o1:o2]))
390 metadata.append((data[off:o1], data[o1:o2]))
391 off = o2
391 off = o2
392
392
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394
394
395 def _fm1encodeonemarker(marker):
395 def _fm1encodeonemarker(marker):
396 pre, sucs, flags, metadata, date, parents = marker
396 pre, sucs, flags, metadata, date, parents = marker
397 # determine node size
397 # determine node size
398 _fm1node = _fm1nodesha1
398 _fm1node = _fm1nodesha1
399 if flags & usingsha256:
399 if flags & usingsha256:
400 _fm1node = _fm1nodesha256
400 _fm1node = _fm1nodesha256
401 numsuc = len(sucs)
401 numsuc = len(sucs)
402 numextranodes = numsuc
402 numextranodes = numsuc
403 if parents is None:
403 if parents is None:
404 numpar = _fm1parentnone
404 numpar = _fm1parentnone
405 else:
405 else:
406 numpar = len(parents)
406 numpar = len(parents)
407 numextranodes += numpar
407 numextranodes += numpar
408 formatnodes = _fm1node * numextranodes
408 formatnodes = _fm1node * numextranodes
409 formatmeta = _fm1metapair * len(metadata)
409 formatmeta = _fm1metapair * len(metadata)
410 format = _fm1fixed + formatnodes + formatmeta
410 format = _fm1fixed + formatnodes + formatmeta
411 # tz is stored in minutes so we divide by 60
411 # tz is stored in minutes so we divide by 60
412 tz = date[1]//60
412 tz = date[1]//60
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 data.extend(sucs)
414 data.extend(sucs)
415 if parents is not None:
415 if parents is not None:
416 data.extend(parents)
416 data.extend(parents)
417 totalsize = _calcsize(format)
417 totalsize = _calcsize(format)
418 for key, value in metadata:
418 for key, value in metadata:
419 lk = len(key)
419 lk = len(key)
420 lv = len(value)
420 lv = len(value)
421 data.append(lk)
421 data.append(lk)
422 data.append(lv)
422 data.append(lv)
423 totalsize += lk + lv
423 totalsize += lk + lv
424 data[0] = totalsize
424 data[0] = totalsize
425 data = [_pack(format, *data)]
425 data = [_pack(format, *data)]
426 for key, value in metadata:
426 for key, value in metadata:
427 data.append(key)
427 data.append(key)
428 data.append(value)
428 data.append(value)
429 return ''.join(data)
429 return ''.join(data)
430
430
431 def _fm1readmarkers(data, off):
431 def _fm1readmarkers(data, off):
432 native = getattr(parsers, 'fm1readmarkers', None)
432 native = getattr(parsers, 'fm1readmarkers', None)
433 if not native:
433 if not native:
434 return _fm1purereadmarkers(data, off)
434 return _fm1purereadmarkers(data, off)
435 stop = len(data) - _fm1fsize
435 stop = len(data) - _fm1fsize
436 return native(data, off, stop)
436 return native(data, off, stop)
437
437
438 # mapping to read/write various marker formats
438 # mapping to read/write various marker formats
439 # <version> -> (decoder, encoder)
439 # <version> -> (decoder, encoder)
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442
442
443 def _readmarkerversion(data):
443 def _readmarkerversion(data):
444 return _unpack('>B', data[0:1])[0]
444 return _unpack('>B', data[0:1])[0]
445
445
446 @util.nogc
446 @util.nogc
447 def _readmarkers(data):
447 def _readmarkers(data):
448 """Read and enumerate markers from raw data"""
448 """Read and enumerate markers from raw data"""
449 diskversion = _readmarkerversion(data)
449 diskversion = _readmarkerversion(data)
450 off = 1
450 off = 1
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off)
454 return diskversion, formats[diskversion][0](data, off)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468 @util.nogc
468 @util.nogc
469 def _addsuccessors(successors, markers):
469 def _addsuccessors(successors, markers):
470 for mark in markers:
470 for mark in markers:
471 successors.setdefault(mark[0], set()).add(mark)
471 successors.setdefault(mark[0], set()).add(mark)
472
472
473 @util.nogc
473 @util.nogc
474 def _addprecursors(precursors, markers):
474 def _addprecursors(precursors, markers):
475 for mark in markers:
475 for mark in markers:
476 for suc in mark[1]:
476 for suc in mark[1]:
477 precursors.setdefault(suc, set()).add(mark)
477 precursors.setdefault(suc, set()).add(mark)
478
478
479 @util.nogc
479 @util.nogc
480 def _addchildren(children, markers):
480 def _addchildren(children, markers):
481 for mark in markers:
481 for mark in markers:
482 parents = mark[5]
482 parents = mark[5]
483 if parents is not None:
483 if parents is not None:
484 for p in parents:
484 for p in parents:
485 children.setdefault(p, set()).add(mark)
485 children.setdefault(p, set()).add(mark)
486
486
487 def _checkinvalidmarkers(markers):
487 def _checkinvalidmarkers(markers):
488 """search for marker with invalid data and raise error if needed
488 """search for marker with invalid data and raise error if needed
489
489
490 Exist as a separated function to allow the evolve extension for a more
490 Exist as a separated function to allow the evolve extension for a more
491 subtle handling.
491 subtle handling.
492 """
492 """
493 for mark in markers:
493 for mark in markers:
494 if node.nullid in mark[1]:
494 if node.nullid in mark[1]:
495 raise error.Abort(_('bad obsolescence marker detected: '
495 raise error.Abort(_('bad obsolescence marker detected: '
496 'invalid successors nullid'))
496 'invalid successors nullid'))
497
497
498 class obsstore(object):
498 class obsstore(object):
499 """Store obsolete markers
499 """Store obsolete markers
500
500
501 Markers can be accessed with two mappings:
501 Markers can be accessed with two mappings:
502 - precursors[x] -> set(markers on precursors edges of x)
502 - precursors[x] -> set(markers on precursors edges of x)
503 - successors[x] -> set(markers on successors edges of x)
503 - successors[x] -> set(markers on successors edges of x)
504 - children[x] -> set(markers on precursors edges of children(x)
504 - children[x] -> set(markers on precursors edges of children(x)
505 """
505 """
506
506
507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
508 # prec: nodeid, precursor changesets
508 # prec: nodeid, precursor changesets
509 # succs: tuple of nodeid, successor changesets (0-N length)
509 # succs: tuple of nodeid, successor changesets (0-N length)
510 # flag: integer, flag field carrying modifier for the markers (see doc)
510 # flag: integer, flag field carrying modifier for the markers (see doc)
511 # meta: binary blob, encoded metadata dictionary
511 # meta: binary blob, encoded metadata dictionary
512 # date: (float, int) tuple, date of marker creation
512 # date: (float, int) tuple, date of marker creation
513 # parents: (tuple of nodeid) or None, parents of precursors
513 # parents: (tuple of nodeid) or None, parents of precursors
514 # None is used when no data has been recorded
514 # None is used when no data has been recorded
515
515
516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
517 # caches for various obsolescence related cache
517 # caches for various obsolescence related cache
518 self.caches = {}
518 self.caches = {}
519 self.svfs = svfs
519 self.svfs = svfs
520 self._defaultformat = defaultformat
520 self._defaultformat = defaultformat
521 self._readonly = readonly
521 self._readonly = readonly
522
522
523 def __iter__(self):
523 def __iter__(self):
524 return iter(self._all)
524 return iter(self._all)
525
525
526 def __len__(self):
526 def __len__(self):
527 return len(self._all)
527 return len(self._all)
528
528
529 def __nonzero__(self):
529 def __nonzero__(self):
530 if not self._cached('_all'):
530 if not self._cached('_all'):
531 try:
531 try:
532 return self.svfs.stat('obsstore').st_size > 1
532 return self.svfs.stat('obsstore').st_size > 1
533 except OSError as inst:
533 except OSError as inst:
534 if inst.errno != errno.ENOENT:
534 if inst.errno != errno.ENOENT:
535 raise
535 raise
536 # just build an empty _all list if no obsstore exists, which
536 # just build an empty _all list if no obsstore exists, which
537 # avoids further stat() syscalls
537 # avoids further stat() syscalls
538 pass
538 pass
539 return bool(self._all)
539 return bool(self._all)
540
540
541 __bool__ = __nonzero__
541 __bool__ = __nonzero__
542
542
543 @property
543 @property
544 def readonly(self):
544 def readonly(self):
545 """True if marker creation is disabled
545 """True if marker creation is disabled
546
546
547 Remove me in the future when obsolete marker is always on."""
547 Remove me in the future when obsolete marker is always on."""
548 return self._readonly
548 return self._readonly
549
549
550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
551 date=None, metadata=None, ui=None):
551 date=None, metadata=None, ui=None):
552 """obsolete: add a new obsolete marker
552 """obsolete: add a new obsolete marker
553
553
554 * ensuring it is hashable
554 * ensuring it is hashable
555 * check mandatory metadata
555 * check mandatory metadata
556 * encode metadata
556 * encode metadata
557
557
558 If you are a human writing code creating marker you want to use the
558 If you are a human writing code creating marker you want to use the
559 `createmarkers` function in this module instead.
559 `createmarkers` function in this module instead.
560
560
561 return True if a new marker have been added, False if the markers
561 return True if a new marker have been added, False if the markers
562 already existed (no op).
562 already existed (no op).
563 """
563 """
564 if metadata is None:
564 if metadata is None:
565 metadata = {}
565 metadata = {}
566 if date is None:
566 if date is None:
567 if 'date' in metadata:
567 if 'date' in metadata:
568 # as a courtesy for out-of-tree extensions
568 # as a courtesy for out-of-tree extensions
569 date = util.parsedate(metadata.pop('date'))
569 date = util.parsedate(metadata.pop('date'))
570 elif ui is not None:
570 elif ui is not None:
571 date = ui.configdate('devel', 'default-date')
571 date = ui.configdate('devel', 'default-date')
572 if date is None:
572 if date is None:
573 date = util.makedate()
573 date = util.makedate()
574 else:
574 else:
575 date = util.makedate()
575 date = util.makedate()
576 if len(prec) != 20:
576 if len(prec) != 20:
577 raise ValueError(prec)
577 raise ValueError(prec)
578 for succ in succs:
578 for succ in succs:
579 if len(succ) != 20:
579 if len(succ) != 20:
580 raise ValueError(succ)
580 raise ValueError(succ)
581 if prec in succs:
581 if prec in succs:
582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
583
583
584 metadata = tuple(sorted(metadata.iteritems()))
584 metadata = tuple(sorted(metadata.iteritems()))
585
585
586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
587 return bool(self.add(transaction, [marker]))
587 return bool(self.add(transaction, [marker]))
588
588
589 def add(self, transaction, markers):
589 def add(self, transaction, markers):
590 """Add new markers to the store
590 """Add new markers to the store
591
591
592 Take care of filtering duplicate.
592 Take care of filtering duplicate.
593 Return the number of new marker."""
593 Return the number of new marker."""
594 if self._readonly:
594 if self._readonly:
595 raise error.Abort(_('creating obsolete markers is not enabled on '
595 raise error.Abort(_('creating obsolete markers is not enabled on '
596 'this repo'))
596 'this repo'))
597 known = set()
597 known = set()
598 getsuccessors = self.successors.get
598 getsuccessors = self.successors.get
599 new = []
599 new = []
600 for m in markers:
600 for m in markers:
601 if m not in getsuccessors(m[0], ()) and m not in known:
601 if m not in getsuccessors(m[0], ()) and m not in known:
602 known.add(m)
602 known.add(m)
603 new.append(m)
603 new.append(m)
604 if new:
604 if new:
605 f = self.svfs('obsstore', 'ab')
605 f = self.svfs('obsstore', 'ab')
606 try:
606 try:
607 offset = f.tell()
607 offset = f.tell()
608 transaction.add('obsstore', offset)
608 transaction.add('obsstore', offset)
609 # offset == 0: new file - add the version header
609 # offset == 0: new file - add the version header
610 for bytes in encodemarkers(new, offset == 0, self._version):
610 for bytes in encodemarkers(new, offset == 0, self._version):
611 f.write(bytes)
611 f.write(bytes)
612 finally:
612 finally:
613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
614 # call 'filecacheentry.refresh()' here
614 # call 'filecacheentry.refresh()' here
615 f.close()
615 f.close()
616 self._addmarkers(new)
616 self._addmarkers(new)
617 # new marker *may* have changed several set. invalidate the cache.
617 # new marker *may* have changed several set. invalidate the cache.
618 self.caches.clear()
618 self.caches.clear()
619 # records the number of new markers for the transaction hooks
619 # records the number of new markers for the transaction hooks
620 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
620 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
621 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
621 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
622 return len(new)
622 return len(new)
623
623
624 def mergemarkers(self, transaction, data):
624 def mergemarkers(self, transaction, data):
625 """merge a binary stream of markers inside the obsstore
625 """merge a binary stream of markers inside the obsstore
626
626
627 Returns the number of new markers added."""
627 Returns the number of new markers added."""
628 version, markers = _readmarkers(data)
628 version, markers = _readmarkers(data)
629 return self.add(transaction, markers)
629 return self.add(transaction, markers)
630
630
631 @propertycache
631 @propertycache
632 def _data(self):
632 def _data(self):
633 return self.svfs.tryread('obsstore')
633 return self.svfs.tryread('obsstore')
634
634
635 @propertycache
635 @propertycache
636 def _version(self):
636 def _version(self):
637 if len(self._data) >= 1:
637 if len(self._data) >= 1:
638 return _readmarkerversion(self._data)
638 return _readmarkerversion(self._data)
639 else:
639 else:
640 return self._defaultformat
640 return self._defaultformat
641
641
642 @propertycache
642 @propertycache
643 def _all(self):
643 def _all(self):
644 data = self._data
644 data = self._data
645 if not data:
645 if not data:
646 return []
646 return []
647 self._version, markers = _readmarkers(data)
647 self._version, markers = _readmarkers(data)
648 markers = list(markers)
648 markers = list(markers)
649 _checkinvalidmarkers(markers)
649 _checkinvalidmarkers(markers)
650 return markers
650 return markers
651
651
652 @propertycache
652 @propertycache
653 def successors(self):
653 def successors(self):
654 successors = {}
654 successors = {}
655 _addsuccessors(successors, self._all)
655 _addsuccessors(successors, self._all)
656 return successors
656 return successors
657
657
658 @propertycache
658 @propertycache
659 def precursors(self):
659 def precursors(self):
660 precursors = {}
660 precursors = {}
661 _addprecursors(precursors, self._all)
661 _addprecursors(precursors, self._all)
662 return precursors
662 return precursors
663
663
664 @propertycache
664 @propertycache
665 def children(self):
665 def children(self):
666 children = {}
666 children = {}
667 _addchildren(children, self._all)
667 _addchildren(children, self._all)
668 return children
668 return children
669
669
670 def _cached(self, attr):
670 def _cached(self, attr):
671 return attr in self.__dict__
671 return attr in self.__dict__
672
672
673 def _addmarkers(self, markers):
673 def _addmarkers(self, markers):
674 markers = list(markers) # to allow repeated iteration
674 markers = list(markers) # to allow repeated iteration
675 self._all.extend(markers)
675 self._all.extend(markers)
676 if self._cached('successors'):
676 if self._cached('successors'):
677 _addsuccessors(self.successors, markers)
677 _addsuccessors(self.successors, markers)
678 if self._cached('precursors'):
678 if self._cached('precursors'):
679 _addprecursors(self.precursors, markers)
679 _addprecursors(self.precursors, markers)
680 if self._cached('children'):
680 if self._cached('children'):
681 _addchildren(self.children, markers)
681 _addchildren(self.children, markers)
682 _checkinvalidmarkers(markers)
682 _checkinvalidmarkers(markers)
683
683
684 def relevantmarkers(self, nodes):
684 def relevantmarkers(self, nodes):
685 """return a set of all obsolescence markers relevant to a set of nodes.
685 """return a set of all obsolescence markers relevant to a set of nodes.
686
686
687 "relevant" to a set of nodes mean:
687 "relevant" to a set of nodes mean:
688
688
689 - marker that use this changeset as successor
689 - marker that use this changeset as successor
690 - prune marker of direct children on this changeset
690 - prune marker of direct children on this changeset
691 - recursive application of the two rules on precursors of these markers
691 - recursive application of the two rules on precursors of these markers
692
692
693 It is a set so you cannot rely on order."""
693 It is a set so you cannot rely on order."""
694
694
695 pendingnodes = set(nodes)
695 pendingnodes = set(nodes)
696 seenmarkers = set()
696 seenmarkers = set()
697 seennodes = set(pendingnodes)
697 seennodes = set(pendingnodes)
698 precursorsmarkers = self.precursors
698 precursorsmarkers = self.precursors
699 succsmarkers = self.successors
699 succsmarkers = self.successors
700 children = self.children
700 children = self.children
701 while pendingnodes:
701 while pendingnodes:
702 direct = set()
702 direct = set()
703 for current in pendingnodes:
703 for current in pendingnodes:
704 direct.update(precursorsmarkers.get(current, ()))
704 direct.update(precursorsmarkers.get(current, ()))
705 pruned = [m for m in children.get(current, ()) if not m[1]]
705 pruned = [m for m in children.get(current, ()) if not m[1]]
706 direct.update(pruned)
706 direct.update(pruned)
707 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
707 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
708 direct.update(pruned)
708 direct.update(pruned)
709 direct -= seenmarkers
709 direct -= seenmarkers
710 pendingnodes = set([m[0] for m in direct])
710 pendingnodes = set([m[0] for m in direct])
711 seenmarkers |= direct
711 seenmarkers |= direct
712 pendingnodes -= seennodes
712 pendingnodes -= seennodes
713 seennodes |= pendingnodes
713 seennodes |= pendingnodes
714 return seenmarkers
714 return seenmarkers
715
715
716 def makestore(ui, repo):
716 def makestore(ui, repo):
717 """Create an obsstore instance from a repo."""
717 """Create an obsstore instance from a repo."""
718 # read default format for new obsstore.
718 # read default format for new obsstore.
719 # developer config: format.obsstore-version
719 # developer config: format.obsstore-version
720 defaultformat = ui.configint('format', 'obsstore-version', None)
720 defaultformat = ui.configint('format', 'obsstore-version')
721 # rely on obsstore class default when possible.
721 # rely on obsstore class default when possible.
722 kwargs = {}
722 kwargs = {}
723 if defaultformat is not None:
723 if defaultformat is not None:
724 kwargs['defaultformat'] = defaultformat
724 kwargs['defaultformat'] = defaultformat
725 readonly = not isenabled(repo, createmarkersopt)
725 readonly = not isenabled(repo, createmarkersopt)
726 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
726 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
727 if store and readonly:
727 if store and readonly:
728 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
728 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
729 % len(list(store)))
729 % len(list(store)))
730 return store
730 return store
731
731
732 def commonversion(versions):
732 def commonversion(versions):
733 """Return the newest version listed in both versions and our local formats.
733 """Return the newest version listed in both versions and our local formats.
734
734
735 Returns None if no common version exists.
735 Returns None if no common version exists.
736 """
736 """
737 versions.sort(reverse=True)
737 versions.sort(reverse=True)
738 # search for highest version known on both side
738 # search for highest version known on both side
739 for v in versions:
739 for v in versions:
740 if v in formats:
740 if v in formats:
741 return v
741 return v
742 return None
742 return None
743
743
744 # arbitrary picked to fit into 8K limit from HTTP server
744 # arbitrary picked to fit into 8K limit from HTTP server
745 # you have to take in account:
745 # you have to take in account:
746 # - the version header
746 # - the version header
747 # - the base85 encoding
747 # - the base85 encoding
748 _maxpayload = 5300
748 _maxpayload = 5300
749
749
750 def _pushkeyescape(markers):
750 def _pushkeyescape(markers):
751 """encode markers into a dict suitable for pushkey exchange
751 """encode markers into a dict suitable for pushkey exchange
752
752
753 - binary data is base85 encoded
753 - binary data is base85 encoded
754 - split in chunks smaller than 5300 bytes"""
754 - split in chunks smaller than 5300 bytes"""
755 keys = {}
755 keys = {}
756 parts = []
756 parts = []
757 currentlen = _maxpayload * 2 # ensure we create a new part
757 currentlen = _maxpayload * 2 # ensure we create a new part
758 for marker in markers:
758 for marker in markers:
759 nextdata = _fm0encodeonemarker(marker)
759 nextdata = _fm0encodeonemarker(marker)
760 if (len(nextdata) + currentlen > _maxpayload):
760 if (len(nextdata) + currentlen > _maxpayload):
761 currentpart = []
761 currentpart = []
762 currentlen = 0
762 currentlen = 0
763 parts.append(currentpart)
763 parts.append(currentpart)
764 currentpart.append(nextdata)
764 currentpart.append(nextdata)
765 currentlen += len(nextdata)
765 currentlen += len(nextdata)
766 for idx, part in enumerate(reversed(parts)):
766 for idx, part in enumerate(reversed(parts)):
767 data = ''.join([_pack('>B', _fm0version)] + part)
767 data = ''.join([_pack('>B', _fm0version)] + part)
768 keys['dump%i' % idx] = util.b85encode(data)
768 keys['dump%i' % idx] = util.b85encode(data)
769 return keys
769 return keys
770
770
771 def listmarkers(repo):
771 def listmarkers(repo):
772 """List markers over pushkey"""
772 """List markers over pushkey"""
773 if not repo.obsstore:
773 if not repo.obsstore:
774 return {}
774 return {}
775 return _pushkeyescape(sorted(repo.obsstore))
775 return _pushkeyescape(sorted(repo.obsstore))
776
776
777 def pushmarker(repo, key, old, new):
777 def pushmarker(repo, key, old, new):
778 """Push markers over pushkey"""
778 """Push markers over pushkey"""
779 if not key.startswith('dump'):
779 if not key.startswith('dump'):
780 repo.ui.warn(_('unknown key: %r') % key)
780 repo.ui.warn(_('unknown key: %r') % key)
781 return False
781 return False
782 if old:
782 if old:
783 repo.ui.warn(_('unexpected old value for %r') % key)
783 repo.ui.warn(_('unexpected old value for %r') % key)
784 return False
784 return False
785 data = util.b85decode(new)
785 data = util.b85decode(new)
786 lock = repo.lock()
786 lock = repo.lock()
787 try:
787 try:
788 tr = repo.transaction('pushkey: obsolete markers')
788 tr = repo.transaction('pushkey: obsolete markers')
789 try:
789 try:
790 repo.obsstore.mergemarkers(tr, data)
790 repo.obsstore.mergemarkers(tr, data)
791 repo.invalidatevolatilesets()
791 repo.invalidatevolatilesets()
792 tr.close()
792 tr.close()
793 return True
793 return True
794 finally:
794 finally:
795 tr.release()
795 tr.release()
796 finally:
796 finally:
797 lock.release()
797 lock.release()
798
798
799 # keep compatibility for the 4.3 cycle
799 # keep compatibility for the 4.3 cycle
800 def allprecursors(obsstore, nodes, ignoreflags=0):
800 def allprecursors(obsstore, nodes, ignoreflags=0):
801 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
801 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
802 util.nouideprecwarn(movemsg, '4.3')
802 util.nouideprecwarn(movemsg, '4.3')
803 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
803 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
804
804
805 def allsuccessors(obsstore, nodes, ignoreflags=0):
805 def allsuccessors(obsstore, nodes, ignoreflags=0):
806 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
806 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
807 util.nouideprecwarn(movemsg, '4.3')
807 util.nouideprecwarn(movemsg, '4.3')
808 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
808 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
809
809
810 def marker(repo, data):
810 def marker(repo, data):
811 movemsg = 'obsolete.marker moved to obsutil.marker'
811 movemsg = 'obsolete.marker moved to obsutil.marker'
812 repo.ui.deprecwarn(movemsg, '4.3')
812 repo.ui.deprecwarn(movemsg, '4.3')
813 return obsutil.marker(repo, data)
813 return obsutil.marker(repo, data)
814
814
815 def getmarkers(repo, nodes=None, exclusive=False):
815 def getmarkers(repo, nodes=None, exclusive=False):
816 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
816 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
817 repo.ui.deprecwarn(movemsg, '4.3')
817 repo.ui.deprecwarn(movemsg, '4.3')
818 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
818 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
819
819
820 def exclusivemarkers(repo, nodes):
820 def exclusivemarkers(repo, nodes):
821 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
821 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
822 repo.ui.deprecwarn(movemsg, '4.3')
822 repo.ui.deprecwarn(movemsg, '4.3')
823 return obsutil.exclusivemarkers(repo, nodes)
823 return obsutil.exclusivemarkers(repo, nodes)
824
824
825 def foreground(repo, nodes):
825 def foreground(repo, nodes):
826 movemsg = 'obsolete.foreground moved to obsutil.foreground'
826 movemsg = 'obsolete.foreground moved to obsutil.foreground'
827 repo.ui.deprecwarn(movemsg, '4.3')
827 repo.ui.deprecwarn(movemsg, '4.3')
828 return obsutil.foreground(repo, nodes)
828 return obsutil.foreground(repo, nodes)
829
829
830 def successorssets(repo, initialnode, cache=None):
830 def successorssets(repo, initialnode, cache=None):
831 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
831 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
832 repo.ui.deprecwarn(movemsg, '4.3')
832 repo.ui.deprecwarn(movemsg, '4.3')
833 return obsutil.successorssets(repo, initialnode, cache=cache)
833 return obsutil.successorssets(repo, initialnode, cache=cache)
834
834
835 # mapping of 'set-name' -> <function to compute this set>
835 # mapping of 'set-name' -> <function to compute this set>
836 cachefuncs = {}
836 cachefuncs = {}
837 def cachefor(name):
837 def cachefor(name):
838 """Decorator to register a function as computing the cache for a set"""
838 """Decorator to register a function as computing the cache for a set"""
839 def decorator(func):
839 def decorator(func):
840 if name in cachefuncs:
840 if name in cachefuncs:
841 msg = "duplicated registration for volatileset '%s' (existing: %r)"
841 msg = "duplicated registration for volatileset '%s' (existing: %r)"
842 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
842 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
843 cachefuncs[name] = func
843 cachefuncs[name] = func
844 return func
844 return func
845 return decorator
845 return decorator
846
846
847 def getrevs(repo, name):
847 def getrevs(repo, name):
848 """Return the set of revision that belong to the <name> set
848 """Return the set of revision that belong to the <name> set
849
849
850 Such access may compute the set and cache it for future use"""
850 Such access may compute the set and cache it for future use"""
851 repo = repo.unfiltered()
851 repo = repo.unfiltered()
852 if not repo.obsstore:
852 if not repo.obsstore:
853 return frozenset()
853 return frozenset()
854 if name not in repo.obsstore.caches:
854 if name not in repo.obsstore.caches:
855 repo.obsstore.caches[name] = cachefuncs[name](repo)
855 repo.obsstore.caches[name] = cachefuncs[name](repo)
856 return repo.obsstore.caches[name]
856 return repo.obsstore.caches[name]
857
857
858 # To be simple we need to invalidate obsolescence cache when:
858 # To be simple we need to invalidate obsolescence cache when:
859 #
859 #
860 # - new changeset is added:
860 # - new changeset is added:
861 # - public phase is changed
861 # - public phase is changed
862 # - obsolescence marker are added
862 # - obsolescence marker are added
863 # - strip is used a repo
863 # - strip is used a repo
864 def clearobscaches(repo):
864 def clearobscaches(repo):
865 """Remove all obsolescence related cache from a repo
865 """Remove all obsolescence related cache from a repo
866
866
867 This remove all cache in obsstore is the obsstore already exist on the
867 This remove all cache in obsstore is the obsstore already exist on the
868 repo.
868 repo.
869
869
870 (We could be smarter here given the exact event that trigger the cache
870 (We could be smarter here given the exact event that trigger the cache
871 clearing)"""
871 clearing)"""
872 # only clear cache is there is obsstore data in this repo
872 # only clear cache is there is obsstore data in this repo
873 if 'obsstore' in repo._filecache:
873 if 'obsstore' in repo._filecache:
874 repo.obsstore.caches.clear()
874 repo.obsstore.caches.clear()
875
875
876 def _mutablerevs(repo):
876 def _mutablerevs(repo):
877 """the set of mutable revision in the repository"""
877 """the set of mutable revision in the repository"""
878 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
878 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
879
879
880 @cachefor('obsolete')
880 @cachefor('obsolete')
881 def _computeobsoleteset(repo):
881 def _computeobsoleteset(repo):
882 """the set of obsolete revisions"""
882 """the set of obsolete revisions"""
883 getnode = repo.changelog.node
883 getnode = repo.changelog.node
884 notpublic = _mutablerevs(repo)
884 notpublic = _mutablerevs(repo)
885 isobs = repo.obsstore.successors.__contains__
885 isobs = repo.obsstore.successors.__contains__
886 obs = set(r for r in notpublic if isobs(getnode(r)))
886 obs = set(r for r in notpublic if isobs(getnode(r)))
887 return obs
887 return obs
888
888
889 @cachefor('unstable')
889 @cachefor('unstable')
890 def _computeunstableset(repo):
890 def _computeunstableset(repo):
891 """the set of non obsolete revisions with obsolete parents"""
891 """the set of non obsolete revisions with obsolete parents"""
892 pfunc = repo.changelog.parentrevs
892 pfunc = repo.changelog.parentrevs
893 mutable = _mutablerevs(repo)
893 mutable = _mutablerevs(repo)
894 obsolete = getrevs(repo, 'obsolete')
894 obsolete = getrevs(repo, 'obsolete')
895 others = mutable - obsolete
895 others = mutable - obsolete
896 unstable = set()
896 unstable = set()
897 for r in sorted(others):
897 for r in sorted(others):
898 # A rev is unstable if one of its parent is obsolete or unstable
898 # A rev is unstable if one of its parent is obsolete or unstable
899 # this works since we traverse following growing rev order
899 # this works since we traverse following growing rev order
900 for p in pfunc(r):
900 for p in pfunc(r):
901 if p in obsolete or p in unstable:
901 if p in obsolete or p in unstable:
902 unstable.add(r)
902 unstable.add(r)
903 break
903 break
904 return unstable
904 return unstable
905
905
906 @cachefor('suspended')
906 @cachefor('suspended')
907 def _computesuspendedset(repo):
907 def _computesuspendedset(repo):
908 """the set of obsolete parents with non obsolete descendants"""
908 """the set of obsolete parents with non obsolete descendants"""
909 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
909 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
910 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
910 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
911
911
912 @cachefor('extinct')
912 @cachefor('extinct')
913 def _computeextinctset(repo):
913 def _computeextinctset(repo):
914 """the set of obsolete parents without non obsolete descendants"""
914 """the set of obsolete parents without non obsolete descendants"""
915 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
915 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
916
916
917
917
918 @cachefor('bumped')
918 @cachefor('bumped')
919 def _computebumpedset(repo):
919 def _computebumpedset(repo):
920 """the set of revs trying to obsolete public revisions"""
920 """the set of revs trying to obsolete public revisions"""
921 bumped = set()
921 bumped = set()
922 # util function (avoid attribute lookup in the loop)
922 # util function (avoid attribute lookup in the loop)
923 phase = repo._phasecache.phase # would be faster to grab the full list
923 phase = repo._phasecache.phase # would be faster to grab the full list
924 public = phases.public
924 public = phases.public
925 cl = repo.changelog
925 cl = repo.changelog
926 torev = cl.nodemap.get
926 torev = cl.nodemap.get
927 for ctx in repo.set('(not public()) and (not obsolete())'):
927 for ctx in repo.set('(not public()) and (not obsolete())'):
928 rev = ctx.rev()
928 rev = ctx.rev()
929 # We only evaluate mutable, non-obsolete revision
929 # We only evaluate mutable, non-obsolete revision
930 node = ctx.node()
930 node = ctx.node()
931 # (future) A cache of precursors may worth if split is very common
931 # (future) A cache of precursors may worth if split is very common
932 for pnode in obsutil.allprecursors(repo.obsstore, [node],
932 for pnode in obsutil.allprecursors(repo.obsstore, [node],
933 ignoreflags=bumpedfix):
933 ignoreflags=bumpedfix):
934 prev = torev(pnode) # unfiltered! but so is phasecache
934 prev = torev(pnode) # unfiltered! but so is phasecache
935 if (prev is not None) and (phase(repo, prev) <= public):
935 if (prev is not None) and (phase(repo, prev) <= public):
936 # we have a public precursor
936 # we have a public precursor
937 bumped.add(rev)
937 bumped.add(rev)
938 break # Next draft!
938 break # Next draft!
939 return bumped
939 return bumped
940
940
941 @cachefor('divergent')
941 @cachefor('divergent')
942 def _computedivergentset(repo):
942 def _computedivergentset(repo):
943 """the set of rev that compete to be the final successors of some revision.
943 """the set of rev that compete to be the final successors of some revision.
944 """
944 """
945 divergent = set()
945 divergent = set()
946 obsstore = repo.obsstore
946 obsstore = repo.obsstore
947 newermap = {}
947 newermap = {}
948 for ctx in repo.set('(not public()) - obsolete()'):
948 for ctx in repo.set('(not public()) - obsolete()'):
949 mark = obsstore.precursors.get(ctx.node(), ())
949 mark = obsstore.precursors.get(ctx.node(), ())
950 toprocess = set(mark)
950 toprocess = set(mark)
951 seen = set()
951 seen = set()
952 while toprocess:
952 while toprocess:
953 prec = toprocess.pop()[0]
953 prec = toprocess.pop()[0]
954 if prec in seen:
954 if prec in seen:
955 continue # emergency cycle hanging prevention
955 continue # emergency cycle hanging prevention
956 seen.add(prec)
956 seen.add(prec)
957 if prec not in newermap:
957 if prec not in newermap:
958 obsutil.successorssets(repo, prec, newermap)
958 obsutil.successorssets(repo, prec, newermap)
959 newer = [n for n in newermap[prec] if n]
959 newer = [n for n in newermap[prec] if n]
960 if len(newer) > 1:
960 if len(newer) > 1:
961 divergent.add(ctx.rev())
961 divergent.add(ctx.rev())
962 break
962 break
963 toprocess.update(obsstore.precursors.get(prec, ()))
963 toprocess.update(obsstore.precursors.get(prec, ()))
964 return divergent
964 return divergent
965
965
966
966
967 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
967 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
968 operation=None):
968 operation=None):
969 """Add obsolete markers between changesets in a repo
969 """Add obsolete markers between changesets in a repo
970
970
971 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
971 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
972 tuple. `old` and `news` are changectx. metadata is an optional dictionary
972 tuple. `old` and `news` are changectx. metadata is an optional dictionary
973 containing metadata for this marker only. It is merged with the global
973 containing metadata for this marker only. It is merged with the global
974 metadata specified through the `metadata` argument of this function,
974 metadata specified through the `metadata` argument of this function,
975
975
976 Trying to obsolete a public changeset will raise an exception.
976 Trying to obsolete a public changeset will raise an exception.
977
977
978 Current user and date are used except if specified otherwise in the
978 Current user and date are used except if specified otherwise in the
979 metadata attribute.
979 metadata attribute.
980
980
981 This function operates within a transaction of its own, but does
981 This function operates within a transaction of its own, but does
982 not take any lock on the repo.
982 not take any lock on the repo.
983 """
983 """
984 # prepare metadata
984 # prepare metadata
985 if metadata is None:
985 if metadata is None:
986 metadata = {}
986 metadata = {}
987 if 'user' not in metadata:
987 if 'user' not in metadata:
988 metadata['user'] = repo.ui.username()
988 metadata['user'] = repo.ui.username()
989 useoperation = repo.ui.configbool('experimental',
989 useoperation = repo.ui.configbool('experimental',
990 'evolution.track-operation',
990 'evolution.track-operation',
991 False)
991 False)
992 if useoperation and operation:
992 if useoperation and operation:
993 metadata['operation'] = operation
993 metadata['operation'] = operation
994 tr = repo.transaction('add-obsolescence-marker')
994 tr = repo.transaction('add-obsolescence-marker')
995 try:
995 try:
996 markerargs = []
996 markerargs = []
997 for rel in relations:
997 for rel in relations:
998 prec = rel[0]
998 prec = rel[0]
999 sucs = rel[1]
999 sucs = rel[1]
1000 localmetadata = metadata.copy()
1000 localmetadata = metadata.copy()
1001 if 2 < len(rel):
1001 if 2 < len(rel):
1002 localmetadata.update(rel[2])
1002 localmetadata.update(rel[2])
1003
1003
1004 if not prec.mutable():
1004 if not prec.mutable():
1005 raise error.Abort(_("cannot obsolete public changeset: %s")
1005 raise error.Abort(_("cannot obsolete public changeset: %s")
1006 % prec,
1006 % prec,
1007 hint="see 'hg help phases' for details")
1007 hint="see 'hg help phases' for details")
1008 nprec = prec.node()
1008 nprec = prec.node()
1009 nsucs = tuple(s.node() for s in sucs)
1009 nsucs = tuple(s.node() for s in sucs)
1010 npare = None
1010 npare = None
1011 if not nsucs:
1011 if not nsucs:
1012 npare = tuple(p.node() for p in prec.parents())
1012 npare = tuple(p.node() for p in prec.parents())
1013 if nprec in nsucs:
1013 if nprec in nsucs:
1014 raise error.Abort(_("changeset %s cannot obsolete itself")
1014 raise error.Abort(_("changeset %s cannot obsolete itself")
1015 % prec)
1015 % prec)
1016
1016
1017 # Creating the marker causes the hidden cache to become invalid,
1017 # Creating the marker causes the hidden cache to become invalid,
1018 # which causes recomputation when we ask for prec.parents() above.
1018 # which causes recomputation when we ask for prec.parents() above.
1019 # Resulting in n^2 behavior. So let's prepare all of the args
1019 # Resulting in n^2 behavior. So let's prepare all of the args
1020 # first, then create the markers.
1020 # first, then create the markers.
1021 markerargs.append((nprec, nsucs, npare, localmetadata))
1021 markerargs.append((nprec, nsucs, npare, localmetadata))
1022
1022
1023 for args in markerargs:
1023 for args in markerargs:
1024 nprec, nsucs, npare, localmetadata = args
1024 nprec, nsucs, npare, localmetadata = args
1025 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1025 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1026 date=date, metadata=localmetadata,
1026 date=date, metadata=localmetadata,
1027 ui=repo.ui)
1027 ui=repo.ui)
1028 repo.filteredrevcache.clear()
1028 repo.filteredrevcache.clear()
1029 tr.close()
1029 tr.close()
1030 finally:
1030 finally:
1031 tr.release()
1031 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now